diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..8dd399ab --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 88 +extend-ignore = E203 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..c153adf5 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,59 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: CI + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +permissions: + contents: read + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry export --with dev > requirements.txt + pip install -r requirements.txt + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v3 + - uses: codecov/codecov-action@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry export --with dev > requirements.txt + pip install -r requirements.txt + - name: Test with pytest + run: | + pytest --cov=pygam + codecov diff --git a/.gitignore b/.gitignore index 07f80bd8..a30b5eb4 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ ###################### *.log .coverage +coverage.xml # OS generated files # ###################### diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 686677d6..00000000 --- a/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: python -cache: pip -python: - - "2.7" - - "3.6" - -install: - - sudo apt-get update - - # We do this conditionally because it saves us some downloading if the - # version is the same. - - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; - fi - - bash miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - - hash -r - - conda config --set always_yes yes --set changeps1 no - - # add conda forge in order to find scikit-sparse - - conda config --add channels conda-forge - - conda update -q conda - - - conda install pytest numpy pandas scipy pytest-cov cython scikit-sparse - - # package reqs - - pip install --upgrade pip - - pip install -r requirements.txt - -script: - - py.test --cov=pygam - -after_success: - - codecov diff --git a/doc/source/conf.py b/doc/source/conf.py index 6f5e9717..92c45e70 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -14,6 +14,7 @@ # import os import sys + sys.path.insert(0, os.path.abspath('../../')) @@ -49,7 +50,7 @@ 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', - 'nbsphinx' + 'nbsphinx', ] # Add any paths that contain templates here, relative to this directory. @@ -137,15 +138,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -155,8 +153,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'pyGAM.tex', 'pyGAM Documentation', - 'Daniel Servén', 'manual'), + (master_doc, 'pyGAM.tex', 'pyGAM Documentation', 'Daniel Servén', 'manual'), ] @@ -164,10 +161,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pygam', 'pyGAM Documentation', - [author], 1) -] +man_pages = [(master_doc, 'pygam', 'pyGAM Documentation', [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -176,9 +170,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pyGAM', 'pyGAM Documentation', - author, 'pyGAM', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + 'pyGAM', + 'pyGAM Documentation', + author, + 'pyGAM', + 'One line description of project.', + 'Miscellaneous', + ), ] diff --git a/gen_imgs.py b/gen_imgs.py index 66508542..3b7c6ac0 100644 --- a/gen_imgs.py +++ b/gen_imgs.py @@ -1,14 +1,23 @@ """ generate some plots for the pyGAM repo """ -import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties -from mpl_toolkits import mplot3d -from pygam import * -from pygam.datasets import hepatitis, wage, faithful, mcycle, trees, default, cake, toy_classification, toy_interaction, chicago +from pygam import GAM, LinearGAM, PoissonGAM, LogisticGAM, ExpectileGAM, s, f, te +from pygam.datasets import ( + hepatitis, + wage, + faithful, + mcycle, + trees, + default, + cake, + toy_classification, + toy_interaction, + chicago, +) np.random.seed(420) fontP = FontProperties() @@ -21,28 +30,30 @@ # monotonic increasing, concave constraint on hep data # prediction intervals on motorcycle data + def gen_basis_fns(): X, y = hepatitis() - gam = LinearGAM(lam=.6, fit_intercept=False).fit(X, y) + gam = LinearGAM(lam=0.6, fit_intercept=False).fit(X, y) XX = gam.generate_X_grid(term=0, n=500) plt.figure() - fig, ax = plt.subplots(2,1) - ax[0].plot(XX, gam._modelmat(XX, term=0).A); + fig, ax = plt.subplots(2, 1) + ax[0].plot(XX, gam._modelmat(XX, term=0).A) ax[0].set_title('b-Spline Basis Functions') ax[1].scatter(X, y, facecolor='gray', edgecolors='none') - ax[1].plot(XX, gam._modelmat(XX).A * gam.coef_); + ax[1].plot(XX, gam._modelmat(XX).A * gam.coef_) ax[1].plot(XX, gam.predict(XX), 'k') ax[1].set_title('Fitted Model') fig.tight_layout() plt.savefig('imgs/pygam_basis.png', dpi=300) + def cake_data_in_one(): X, y = cake() gam = LinearGAM(fit_intercept=True) - gam.gridsearch(X,y) + gam.gridsearch(X, y) XX = gam.generate_X_grid() @@ -51,17 +62,19 @@ def cake_data_in_one(): plt.title('LinearGAM') plt.savefig('imgs/pygam_cake_data.png', dpi=300) + def faithful_data_poisson(): X, y = faithful() gam = PoissonGAM().gridsearch(X, y) plt.figure() - plt.hist(faithful(return_X_y=False)['eruptions'], bins=200, color='k'); + plt.hist(faithful(return_X_y=False)['eruptions'], bins=200, color='k') plt.plot(X, gam.predict(X), color='r') plt.title('Best Lambda: {0:.2f}'.format(gam.lam[0][0])) plt.savefig('imgs/pygam_poisson.png', dpi=300) + def single_data_linear(): X, y = mcycle() @@ -75,6 +88,7 @@ def single_data_linear(): plt.title('Best Lambda: {0:.2f}'.format(gam.lam)) plt.savefig('imgs/pygam_single_pred_linear.png', dpi=300) + def mcycle_data_linear(): X, y = mcycle() @@ -85,12 +99,11 @@ def mcycle_data_linear(): plt.figure() plt.scatter(X, y, facecolor='gray', edgecolors='none') plt.plot(XX, gam.predict(XX), 'r--') - plt.plot(XX, gam.prediction_intervals(XX, width=.95), color='b', ls='--') + plt.plot(XX, gam.prediction_intervals(XX, width=0.95), color='b', ls='--') plt.title('95% prediction interval') plt.savefig('imgs/pygam_mcycle_data_linear.png', dpi=300) - m = X.min() M = X.max() XX = np.linspace(m - 10, M + 10, 500) @@ -106,28 +119,34 @@ def mcycle_data_linear(): plt.savefig('imgs/pygam_mcycle_data_extrapolation.png', dpi=300) + def wage_data_linear(): X, y = wage() gam = LinearGAM(s(0) + s(1) + f(2)) - gam.gridsearch(X, y, lam=np.logspace(-5,3,50)) + gam.gridsearch(X, y, lam=np.logspace(-5, 3, 50)) plt.figure() - fig, axs = plt.subplots(1,3) + fig, axs = plt.subplots(1, 3) titles = ['year', 'age', 'education'] for i, ax in enumerate(axs): XX = gam.generate_X_grid(term=i) ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX)) - ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX, width=.95)[1], - c='r', ls='--') + ax.plot( + XX[:, i], + gam.partial_dependence(term=i, X=XX, width=0.95)[1], + c='r', + ls='--', + ) if i == 0: - ax.set_ylim(-30,30); + ax.set_ylim(-30, 30) ax.set_title(titles[i]) fig.tight_layout() plt.savefig('imgs/pygam_wage_data_linear.png', dpi=300) + def default_data_logistic(): X, y = default() @@ -135,20 +154,25 @@ def default_data_logistic(): gam.gridsearch(X, y) plt.figure() - fig, axs = plt.subplots(1,3) + fig, axs = plt.subplots(1, 3) titles = ['student', 'balance', 'income'] for i, ax in enumerate(axs): XX = gam.generate_X_grid(term=i) ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX)) - ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX, width=.95)[1], - c='r', ls='--') + ax.plot( + XX[:, i], + gam.partial_dependence(term=i, X=XX, width=0.95)[1], + c='r', + ls='--', + ) ax.set_title(titles[i]) fig.tight_layout() plt.savefig('imgs/pygam_default_data_logistic.png', dpi=300) + def constraints(): X, y = hepatitis(return_X_y=True) @@ -167,6 +191,7 @@ def constraints(): fig.tight_layout() plt.savefig('imgs/pygam_constraints.png', dpi=300) + def trees_data_custom(): X, y = trees() gam = GAM(distribution='gamma', link='log') @@ -178,6 +203,7 @@ def trees_data_custom(): plt.ylabel('predicted volume') plt.savefig('imgs/pygam_custom.png', dpi=300) + # def gen_single_data(n=200): # """ # 1-dimensional Logistic problem @@ -186,7 +212,7 @@ def trees_data_custom(): # # log_odds = -.5*x**2 + 5 # p = 1/(1+np.exp(-log_odds)).squeeze() -# y = (np.random.rand(len(x)) < p).astype(np.int) +# y = (np.random.rand(len(x)) < p).astype(int) # # lgam = LogisticGAM() # lgam.fit(x, y) @@ -247,18 +273,19 @@ def gen_multi_data(n=5000): plt.plot(lgam.logs_['deviance']) plt.savefig('imgs/pygam_multi_deviance.png', dpi=300) + def gen_tensor_data(): """ toy interaction data """ X, y = toy_interaction(return_X_y=True, n=10000) - gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y) + gam = LinearGAM(te(0, 1, lam=0.1)).fit(X, y) XX = gam.generate_X_grid(term=0, meshgrid=True) Z = gam.partial_dependence(term=0, meshgrid=True) - fig = plt.figure(figsize=(9,6)) + fig = plt.figure(figsize=(9, 6)) ax = plt.axes(projection='3d') ax.dist = 7.5 ax.plot_surface(XX[0], XX[1], Z, cmap='viridis') @@ -266,6 +293,7 @@ def gen_tensor_data(): fig.tight_layout() plt.savefig('imgs/pygam_tensor.png', transparent=True, dpi=300) + def chicago_tensor(): """ chicago tensor diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..36cbadea --- /dev/null +++ b/poetry.lock @@ -0,0 +1,2055 @@ +# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. + +[[package]] +name = "alabaster" +version = "0.7.13" +description = "A configurable sidebar-enabled Sphinx theme" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, + {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, +] + +[[package]] +name = "appnope" +version = "0.1.3" +description = "Disable App Nap on macOS >= 10.9" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, + {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, +] + +[[package]] +name = "asttokens" +version = "2.2.1" +description = "Annotate AST trees with source code positions" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, + {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, +] + +[package.dependencies] +six = "*" + +[package.extras] +test = ["astroid", "pytest"] + +[[package]] +name = "attrs" +version = "22.2.0" +description = "Classes Without Boilerplate" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] +tests = ["attrs[tests-no-zope]", "zope.interface"] +tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] + +[[package]] +name = "babel" +version = "2.12.1" +description = "Internationalization utilities" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, +] + +[package.dependencies] +pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} + +[[package]] +name = "backcall" +version = "0.2.0" +description = "Specifications for callback functions passed in to an API" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, + {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.11.2" +description = "Screen-scraping library" +category = "dev" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.11.2-py3-none-any.whl", hash = "sha256:0e79446b10b3ecb499c1556f7e228a53e64a2bfcebd455f370d8927cb5b59e39"}, + {file = "beautifulsoup4-4.11.2.tar.gz", hash = "sha256:bc4bdda6717de5a2987436fb8d72f45dc90dd856bdfd512a1314ce90349a0106"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "black" +version = "23.1.0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, + {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, + {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, + {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, + {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, + {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, + {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, + {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, + {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, + {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, + {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, + {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, + {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, + {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "bleach" +version = "6.0.0" +description = "An easy safelist-based HTML-sanitizing tool." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, + {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, +] + +[package.dependencies] +six = ">=1.9.0" +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.2)"] + +[[package]] +name = "certifi" +version = "2022.12.7" +description = "Python package for providing Mozilla's CA Bundle." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.1.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "codecov" +version = "2.1.12" +description = "Hosted coverage reports for GitHub, Bitbucket and Gitlab" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "codecov-2.1.12-py2.py3-none-any.whl", hash = "sha256:585dc217dc3d8185198ceb402f85d5cb5dbfa0c5f350a5abcdf9e347776a5b47"}, + {file = "codecov-2.1.12.tar.gz", hash = "sha256:a0da46bb5025426da895af90938def8ee12d37fcbcbbbc15b6dc64cf7ebc51c1"}, +] + +[package.dependencies] +coverage = "*" +requests = ">=2.7.9" + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.2.1" +description = "Code coverage measurement for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "coverage-7.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49567ec91fc5e0b15356da07a2feabb421d62f52a9fff4b1ec40e9e19772f5f8"}, + {file = "coverage-7.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2ef6cae70168815ed91388948b5f4fcc69681480a0061114db737f957719f03"}, + {file = "coverage-7.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3004765bca3acd9e015794e5c2f0c9a05587f5e698127ff95e9cfba0d3f29339"}, + {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cca7c0b7f5881dfe0291ef09ba7bb1582cb92ab0aeffd8afb00c700bf692415a"}, + {file = "coverage-7.2.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2167d116309f564af56f9aa5e75ef710ef871c5f9b313a83050035097b56820"}, + {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cb5f152fb14857cbe7f3e8c9a5d98979c4c66319a33cad6e617f0067c9accdc4"}, + {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:87dc37f16fb5e3a28429e094145bf7c1753e32bb50f662722e378c5851f7fdc6"}, + {file = "coverage-7.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e191a63a05851f8bce77bc875e75457f9b01d42843f8bd7feed2fc26bbe60833"}, + {file = "coverage-7.2.1-cp310-cp310-win32.whl", hash = "sha256:e3ea04b23b114572b98a88c85379e9e9ae031272ba1fb9b532aa934c621626d4"}, + {file = "coverage-7.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:0cf557827be7eca1c38a2480484d706693e7bb1929e129785fe59ec155a59de6"}, + {file = "coverage-7.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:570c21a29493b350f591a4b04c158ce1601e8d18bdcd21db136fbb135d75efa6"}, + {file = "coverage-7.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e872b082b32065ac2834149dc0adc2a2e6d8203080501e1e3c3c77851b466f9"}, + {file = "coverage-7.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac6343bae03b176e9b58104a9810df3cdccd5cfed19f99adfa807ffbf43cf9b"}, + {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abacd0a738e71b20e224861bc87e819ef46fedba2fb01bc1af83dfd122e9c319"}, + {file = "coverage-7.2.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9256d4c60c4bbfec92721b51579c50f9e5062c21c12bec56b55292464873508"}, + {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80559eaf6c15ce3da10edb7977a1548b393db36cbc6cf417633eca05d84dd1ed"}, + {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bd7e628f6c3ec4e7d2d24ec0e50aae4e5ae95ea644e849d92ae4805650b4c4e"}, + {file = "coverage-7.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09643fb0df8e29f7417adc3f40aaf379d071ee8f0350ab290517c7004f05360b"}, + {file = "coverage-7.2.1-cp311-cp311-win32.whl", hash = "sha256:1b7fb13850ecb29b62a447ac3516c777b0e7a09ecb0f4bb6718a8654c87dfc80"}, + {file = "coverage-7.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:617a94ada56bbfe547aa8d1b1a2b8299e2ec1ba14aac1d4b26a9f7d6158e1273"}, + {file = "coverage-7.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8649371570551d2fd7dee22cfbf0b61f1747cdfb2b7587bb551e4beaaa44cb97"}, + {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d2b9b5e70a21474c105a133ba227c61bc95f2ac3b66861143ce39a5ea4b3f84"}, + {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82c988954722fa07ec5045c57b6d55bc1a0890defb57cf4a712ced65b26ddd"}, + {file = "coverage-7.2.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:861cc85dfbf55a7a768443d90a07e0ac5207704a9f97a8eb753292a7fcbdfcfc"}, + {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0339dc3237c0d31c3b574f19c57985fcbe494280153bbcad33f2cdf469f4ac3e"}, + {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5928b85416a388dd557ddc006425b0c37e8468bd1c3dc118c1a3de42f59e2a54"}, + {file = "coverage-7.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d3843ca645f62c426c3d272902b9de90558e9886f15ddf5efe757b12dd376f5"}, + {file = "coverage-7.2.1-cp37-cp37m-win32.whl", hash = "sha256:6a034480e9ebd4e83d1aa0453fd78986414b5d237aea89a8fdc35d330aa13bae"}, + {file = "coverage-7.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6fce673f79a0e017a4dc35e18dc7bb90bf6d307c67a11ad5e61ca8d42b87cbff"}, + {file = "coverage-7.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f099da6958ddfa2ed84bddea7515cb248583292e16bb9231d151cd528eab657"}, + {file = "coverage-7.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:97a3189e019d27e914ecf5c5247ea9f13261d22c3bb0cfcfd2a9b179bb36f8b1"}, + {file = "coverage-7.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a81dbcf6c6c877986083d00b834ac1e84b375220207a059ad45d12f6e518a4e3"}, + {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d2c3dde4c0b9be4b02067185136b7ee4681978228ad5ec1278fa74f5ca3e99"}, + {file = "coverage-7.2.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a209d512d157379cc9ab697cbdbb4cfd18daa3e7eebaa84c3d20b6af0037384"}, + {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f3d07edb912a978915576a776756069dede66d012baa503022d3a0adba1b6afa"}, + {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8dca3c1706670297851bca1acff9618455122246bdae623be31eca744ade05ec"}, + {file = "coverage-7.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b1991a6d64231a3e5bbe3099fb0dd7c9aeaa4275ad0e0aeff4cb9ef885c62ba2"}, + {file = "coverage-7.2.1-cp38-cp38-win32.whl", hash = "sha256:22c308bc508372576ffa3d2dbc4824bb70d28eeb4fcd79d4d1aed663a06630d0"}, + {file = "coverage-7.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0c0d46de5dd97f6c2d1b560bf0fcf0215658097b604f1840365296302a9d1fb"}, + {file = "coverage-7.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4dd34a935de268a133e4741827ae951283a28c0125ddcdbcbba41c4b98f2dfef"}, + {file = "coverage-7.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0f8318ed0f3c376cfad8d3520f496946977abde080439d6689d7799791457454"}, + {file = "coverage-7.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:834c2172edff5a08d78e2f53cf5e7164aacabeb66b369f76e7bb367ca4e2d993"}, + {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4d70c853f0546855f027890b77854508bdb4d6a81242a9d804482e667fff6e6"}, + {file = "coverage-7.2.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a6450da4c7afc4534305b2b7d8650131e130610cea448ff240b6ab73d7eab63"}, + {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:99f4dd81b2bb8fc67c3da68b1f5ee1650aca06faa585cbc6818dbf67893c6d58"}, + {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bdd3f2f285ddcf2e75174248b2406189261a79e7fedee2ceeadc76219b6faa0e"}, + {file = "coverage-7.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f29351393eb05e6326f044a7b45ed8e38cb4dcc38570d12791f271399dc41431"}, + {file = "coverage-7.2.1-cp39-cp39-win32.whl", hash = "sha256:e2b50ebc2b6121edf352336d503357321b9d8738bb7a72d06fc56153fd3f4cd8"}, + {file = "coverage-7.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd5a12239c0006252244f94863f1c518ac256160cd316ea5c47fb1a11b25889a"}, + {file = "coverage-7.2.1-pp37.pp38.pp39-none-any.whl", hash = "sha256:436313d129db7cf5b4ac355dd2bd3f7c7e5294af077b090b85de75f8458b8616"}, + {file = "coverage-7.2.1.tar.gz", hash = "sha256:c77f2a9093ccf329dd523a9b2b3c854c20d2a3d968b6def3b820272ca6732242"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "docutils" +version = "0.18.1" +description = "Docutils -- Python Documentation Utilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, + {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "executing" +version = "1.2.0" +description = "Get the currently executing AST node of a frame, and other information" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"}, + {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"}, +] + +[package.extras] +tests = ["asttokens", "littleutils", "pytest", "rich"] + +[[package]] +name = "fastjsonschema" +version = "2.16.3" +description = "Fastest Python implementation of JSON schema" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "fastjsonschema-2.16.3-py3-none-any.whl", hash = "sha256:04fbecc94300436f628517b05741b7ea009506ce8f946d40996567c669318490"}, + {file = "fastjsonschema-2.16.3.tar.gz", hash = "sha256:4a30d6315a68c253cfa8f963b9697246315aa3db89f98b97235e345dedfb0b8e"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "flake8" +version = "6.0.0" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-6.0.0-py2.py3-none-any.whl", hash = "sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7"}, + {file = "flake8-6.0.0.tar.gz", hash = "sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.10.0,<2.11.0" +pyflakes = ">=3.0.0,<3.1.0" + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +description = "Getting image size from png/jpeg/jpeg2000/gif file" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.0.0" +description = "Read metadata from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, + {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "importlib-resources" +version = "5.12.0" +description = "Read resources from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipython" +version = "8.11.0" +description = "IPython: Productive Interactive Computing" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipython-8.11.0-py3-none-any.whl", hash = "sha256:5b54478e459155a326bf5f42ee4f29df76258c0279c36f21d71ddb560f88b156"}, + {file = "ipython-8.11.0.tar.gz", hash = "sha256:735cede4099dbc903ee540307b9171fbfef4aa75cfcacc5a273b2cda2f02be04"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "sys_platform == \"darwin\""} +backcall = "*" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +pickleshare = "*" +prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" + +[package.extras] +all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +black = ["black"] +doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] + +[[package]] +name = "jedi" +version = "0.18.2" +description = "An autocompletion tool for Python that can be used for text editors." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"}, + {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"}, +] + +[package.dependencies] +parso = ">=0.8.0,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.17.3" +description = "An implementation of JSON Schema validation for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] + +[package.dependencies] +attrs = ">=17.4.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jupyter-client" +version = "8.0.3" +description = "Jupyter protocol implementation and client libraries" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.0.3-py3-none-any.whl", hash = "sha256:be48ac6bd659cbbddb7a674cf06b3b8afbf53f228253cf58bde604c03bd487b0"}, + {file = "jupyter_client-8.0.3.tar.gz", hash = "sha256:ed65498bea6d876ef9d8da3e0db3dd33c5d129f5b2645f56ae03993782966bd0"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["codecov", "coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.2.0" +description = "Jupyter core package. A base package on which Jupyter projects rely." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.2.0-py3-none-any.whl", hash = "sha256:4bdc2928c37f6917130c667d8b8708f20aee539d8283c6be72aabd2a4b4c83b0"}, + {file = "jupyter_core-5.2.0.tar.gz", hash = "sha256:1407cdb4c79ee467696c04b76633fc1884015fa109323365a6372c8e890cc83f"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=1.0", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.2.2" +description = "Pygments theme using JupyterLab CSS variables" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, + {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, +] + +[[package]] +name = "markupsafe" +version = "2.1.2" +description = "Safely add untrusted strings to HTML/XML markup." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, + {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.6" +description = "Inline Matplotlib backend for Jupyter" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, + {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mistune" +version = "2.0.5" +description = "A sane Markdown parser with useful plugins and renderers" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mistune-2.0.5-py2.py3-none-any.whl", hash = "sha256:bad7f5d431886fcbaf5f758118ecff70d31f75231b34024a1341120340a65ce8"}, + {file = "mistune-2.0.5.tar.gz", hash = "sha256:0246113cb2492db875c6be56974a7c893333bf26cd92891c85f63151cee09d34"}, +] + +[[package]] +name = "mock" +version = "5.0.1" +description = "Rolling backport of unittest.mock for all Pythons" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mock-5.0.1-py3-none-any.whl", hash = "sha256:c41cfb1e99ba5d341fbcc5308836e7d7c9786d302f995b2c271ce2144dece9eb"}, + {file = "mock-5.0.1.tar.gz", hash = "sha256:e3ea505c03babf7977fd21674a69ad328053d414f05e6433c30d8fa14a534a6b"}, +] + +[package.extras] +build = ["blurb", "twine", "wheel"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nbclient" +version = "0.7.2" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "nbclient-0.7.2-py3-none-any.whl", hash = "sha256:d97ac6257de2794f5397609df754fcbca1a603e94e924eb9b99787c031ae2e7c"}, + {file = "nbclient-0.7.2.tar.gz", hash = "sha256:884a3f4a8c4fc24bb9302f263e0af47d97f0d01fe11ba714171b320c8ac09547"}, +] + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +nbformat = ">=5.1" +traitlets = ">=5.3" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme"] +test = ["ipykernel", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "7.2.9" +description = "Converting Jupyter Notebooks" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "nbconvert-7.2.9-py3-none-any.whl", hash = "sha256:495638c5e06005f4a5ce828d8a81d28e34f95c20f4384d5d7a22254b443836e7"}, + {file = "nbconvert-7.2.9.tar.gz", hash = "sha256:a42c3ac137c64f70cbe4d763111bf358641ea53b37a01a5c202ed86374af5234"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +bleach = "*" +defusedxml = "*" +importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +markupsafe = ">=2.0" +mistune = ">=2.0.3,<3" +nbclient = ">=0.5.0" +nbformat = ">=5.1" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +tinycss2 = "*" +traitlets = ">=5.0" + +[package.extras] +all = ["nbconvert[docs,qtpdf,serve,test,webpdf]"] +docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] +qtpdf = ["nbconvert[qtpng]"] +qtpng = ["pyqtwebengine (>=5.15)"] +serve = ["tornado (>=6.1)"] +test = ["ipykernel", "ipywidgets (>=7)", "pre-commit", "pytest", "pytest-dependency"] +webpdf = ["pyppeteer (>=1,<1.1)"] + +[[package]] +name = "nbformat" +version = "5.7.3" +description = "The Jupyter Notebook format" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "nbformat-5.7.3-py3-none-any.whl", hash = "sha256:22a98a6516ca216002b0a34591af5bcb8072ca6c63910baffc901cfa07fefbf0"}, + {file = "nbformat-5.7.3.tar.gz", hash = "sha256:4b021fca24d3a747bf4e626694033d792d594705829e5e35b14ee3369f9f6477"}, +] + +[package.dependencies] +fastjsonschema = "*" +jsonschema = ">=2.6" +jupyter-core = "*" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nbsphinx" +version = "0.9.0" +description = "Jupyter Notebook Tools for Sphinx" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "nbsphinx-0.9.0-py3-none-any.whl", hash = "sha256:09d578cf2dfe38ed8caf680d8b6b66aa2da475f8e2daff3a14757b3df5631752"}, + {file = "nbsphinx-0.9.0.tar.gz", hash = "sha256:996d09770b5aee2a9aba4dc690e2eff2380aab7b57a8b954fab70d067a8a6e55"}, +] + +[package.dependencies] +docutils = "*" +jinja2 = "*" +nbconvert = "!=5.4" +nbformat = "*" +sphinx = ">=1.8" +traitlets = ">=5" + +[[package]] +name = "numpy" +version = "1.24.2" +description = "Fundamental package for array computing in Python" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"}, + {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"}, + {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"}, + {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"}, + {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"}, + {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"}, + {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"}, + {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"}, + {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"}, + {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"}, + {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"}, +] + +[[package]] +name = "packaging" +version = "23.0" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, + {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, +] + +[[package]] +name = "pandas" +version = "1.5.3" +description = "Powerful data structures for data analysis, time series, and statistics" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.1" +pytz = ">=2020.1" + +[package.extras] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] + +[[package]] +name = "pandocfilters" +version = "1.5.0" +description = "Utilities for writing pandoc filters in python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, + {file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"}, +] + +[[package]] +name = "parso" +version = "0.8.3" +description = "A Python Parser" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, +] + +[package.extras] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["docopt", "pytest (<6.0.0)"] + +[[package]] +name = "pathspec" +version = "0.11.0" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.0-py3-none-any.whl", hash = "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229"}, + {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, +] + +[[package]] +name = "pexpect" +version = "4.8.0" +description = "Pexpect allows easy control of interactive console applications." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, + {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pickleshare" +version = "0.7.5" +description = "Tiny 'shelve'-like database with concurrency support" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, + {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, +] + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "platformdirs" +version = "3.1.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, + {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, +] + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pockets" +version = "0.9.1" +description = "A collection of helpful Python tools!" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pockets-0.9.1-py2.py3-none-any.whl", hash = "sha256:68597934193c08a08eb2bf6a1d85593f627c22f9b065cc727a4f03f669d96d86"}, + {file = "pockets-0.9.1.tar.gz", hash = "sha256:9320f1a3c6f7a9133fe3b571f283bcf3353cd70249025ae8d618e40e9f7e92b3"}, +] + +[package.dependencies] +six = ">=1.5.2" + +[[package]] +name = "progressbar2" +version = "4.2.0" +description = "A Python Progressbar library to provide visual (yet text based) progress to long running operations." +category = "main" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "progressbar2-4.2.0-py2.py3-none-any.whl", hash = "sha256:1a8e201211f99a85df55f720b3b6da7fb5c8cdef56792c4547205be2de5ea606"}, + {file = "progressbar2-4.2.0.tar.gz", hash = "sha256:1393922fcb64598944ad457569fbeb4b3ac189ef50b5adb9cef3284e87e394ce"}, +] + +[package.dependencies] +python-utils = ">=3.0.0" + +[package.extras] +docs = ["sphinx (>=1.8.5)"] +tests = ["flake8 (>=3.7.7)", "freezegun (>=0.3.11)", "pytest (>=4.6.9)", "pytest-cov (>=2.6.1)", "pytest-mypy", "sphinx (>=1.8.5)"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.38" +description = "Library for building powerful interactive command lines in Python" +category = "dev" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"}, + {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "pycodestyle" +version = "2.10.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pycodestyle-2.10.0-py2.py3-none-any.whl", hash = "sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"}, + {file = "pycodestyle-2.10.0.tar.gz", hash = "sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pyflakes" +version = "3.0.1" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyflakes-3.0.1-py2.py3-none-any.whl", hash = "sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf"}, + {file = "pyflakes-3.0.1.tar.gz", hash = "sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd"}, +] + +[[package]] +name = "pygments" +version = "2.14.0" +description = "Pygments is a syntax highlighting package written in Python." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, + {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pyrsistent" +version = "0.19.3" +description = "Persistent/Functional/Immutable data structures" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, +] + +[[package]] +name = "pytest" +version = "7.2.2" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, + {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "4.0.0" +description = "Pytest plugin for measuring coverage." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, + {file = "pytest_cov-4.0.0-py3-none-any.whl", hash = "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-utils" +version = "3.5.2" +description = "Python Utils is a module with some convenient utilities not included with the standard Python install" +category = "main" +optional = false +python-versions = ">3.6.0" +files = [ + {file = "python-utils-3.5.2.tar.gz", hash = "sha256:68198854fc276bc4b2403b261703c218e01ef564dcb072a7096ed9ea7aa5130c"}, + {file = "python_utils-3.5.2-py2.py3-none-any.whl", hash = "sha256:8bfefc3430f1c48408fa0e5958eee51d39840a5a987c2181a579e99ab6fe5ca6"}, +] + +[package.extras] +docs = ["mock", "python-utils", "sphinx"] +loguru = ["loguru"] +tests = ["flake8", "loguru", "pytest", "pytest-asyncio", "pytest-cov", "pytest-mypy", "sphinx", "types-setuptools"] + +[[package]] +name = "pytz" +version = "2022.7.1" +description = "World timezone definitions, modern and historical" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, + {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, +] + +[[package]] +name = "pywin32" +version = "305" +description = "Python for Window Extensions" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-305-cp310-cp310-win32.whl", hash = "sha256:421f6cd86e84bbb696d54563c48014b12a23ef95a14e0bdba526be756d89f116"}, + {file = "pywin32-305-cp310-cp310-win_amd64.whl", hash = "sha256:73e819c6bed89f44ff1d690498c0a811948f73777e5f97c494c152b850fad478"}, + {file = "pywin32-305-cp310-cp310-win_arm64.whl", hash = "sha256:742eb905ce2187133a29365b428e6c3b9001d79accdc30aa8969afba1d8470f4"}, + {file = "pywin32-305-cp311-cp311-win32.whl", hash = "sha256:19ca459cd2e66c0e2cc9a09d589f71d827f26d47fe4a9d09175f6aa0256b51c2"}, + {file = "pywin32-305-cp311-cp311-win_amd64.whl", hash = "sha256:326f42ab4cfff56e77e3e595aeaf6c216712bbdd91e464d167c6434b28d65990"}, + {file = "pywin32-305-cp311-cp311-win_arm64.whl", hash = "sha256:4ecd404b2c6eceaca52f8b2e3e91b2187850a1ad3f8b746d0796a98b4cea04db"}, + {file = "pywin32-305-cp36-cp36m-win32.whl", hash = "sha256:48d8b1659284f3c17b68587af047d110d8c44837736b8932c034091683e05863"}, + {file = "pywin32-305-cp36-cp36m-win_amd64.whl", hash = "sha256:13362cc5aa93c2beaf489c9c9017c793722aeb56d3e5166dadd5ef82da021fe1"}, + {file = "pywin32-305-cp37-cp37m-win32.whl", hash = "sha256:a55db448124d1c1484df22fa8bbcbc45c64da5e6eae74ab095b9ea62e6d00496"}, + {file = "pywin32-305-cp37-cp37m-win_amd64.whl", hash = "sha256:109f98980bfb27e78f4df8a51a8198e10b0f347257d1e265bb1a32993d0c973d"}, + {file = "pywin32-305-cp38-cp38-win32.whl", hash = "sha256:9dd98384da775afa009bc04863426cb30596fd78c6f8e4e2e5bbf4edf8029504"}, + {file = "pywin32-305-cp38-cp38-win_amd64.whl", hash = "sha256:56d7a9c6e1a6835f521788f53b5af7912090674bb84ef5611663ee1595860fc7"}, + {file = "pywin32-305-cp39-cp39-win32.whl", hash = "sha256:9d968c677ac4d5cbdaa62fd3014ab241718e619d8e36ef8e11fb930515a1e918"}, + {file = "pywin32-305-cp39-cp39-win_amd64.whl", hash = "sha256:50768c6b7c3f0b38b7fb14dd4104da93ebced5f1a50dc0e834594bff6fbe1271"}, +] + +[[package]] +name = "pyzmq" +version = "25.0.1" +description = "Python bindings for 0MQ" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyzmq-25.0.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:94f65e13e6df035b0ae90d49adfe7891aa4e7bdeaa65265729fecc04ab3eb0fe"}, + {file = "pyzmq-25.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f0399450d970990705ce47ed65f5efed3e4627dfc80628c3798100e7b72e023b"}, + {file = "pyzmq-25.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f29709b0431668a967d7ff0394b00a865e7b7dde827ee0a47938b705b7c4aec3"}, + {file = "pyzmq-25.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4fee9420b34c0ab426f105926a701a3d73f878fe77f07a1b92e0b78d1e2c795c"}, + {file = "pyzmq-25.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57be375c6bc66b0f685cd298e5c1c3d7ee34a254145b8087aed6e25db372b0f3"}, + {file = "pyzmq-25.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a3309b2c5a5be3d48c9ade77b340361764449aa22854ac65935b1e6c0cdabe2c"}, + {file = "pyzmq-25.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7574d24579e83ee8c5d3b14769e7ba895161c43a601e911dd89d449e545e00ad"}, + {file = "pyzmq-25.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:041d617091258133e602919b28fdce4d3e2f8aedcd1e8b34c599653bc288d59e"}, + {file = "pyzmq-25.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7897ba8c3fedc6b3023bad676ceb69dbf90c077ff18ae3133ba43db47417cc72"}, + {file = "pyzmq-25.0.1-cp310-cp310-win32.whl", hash = "sha256:c462f70dadbd4649e572ca7cd1e7cf3305a8c2afc53b84214c0a7c0c3af8a657"}, + {file = "pyzmq-25.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e3a721710992cf0e213bbb7be48fb0f32202e8d01f556c196c870373bb9ad4f4"}, + {file = "pyzmq-25.0.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:b0a0fcf56279b9f3acc9b36a83feb7640c51b0db444b6870e4406d002be1d514"}, + {file = "pyzmq-25.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95aff52fc847ea5755d2370f86e379ba2ed6eb67a0a6f90f0e8e99c553693b81"}, + {file = "pyzmq-25.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b55366e6c11e1ef7403d072b9867b62cf63eebd31dd038ef65bc8d65572854f6"}, + {file = "pyzmq-25.0.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64a2bc72bcad705ee42a8fe877478ddadb7e260e806562833d3d814125e28a44"}, + {file = "pyzmq-25.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca66aa24422d7f324acd5cb7fc7df616eb6f0205e059393fb108702e33e90c7"}, + {file = "pyzmq-25.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:58d5dfec2e2befd09b04c4683b3c984d2203cf6e054d0f9786be3826737ad612"}, + {file = "pyzmq-25.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3549292d65987e422e2c9f105b1485448381f489d8a6b6b040fc8b8f497bd578"}, + {file = "pyzmq-25.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5b1ca8b0df50d1ac88857ffe9ebd1347e0a5bb5f6e1d99940fdd7df0ffdefb49"}, + {file = "pyzmq-25.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1a107e89cdcf799060ba4fa85fd3c942e19df7b24eb2600618b2406cc73c18e"}, + {file = "pyzmq-25.0.1-cp311-cp311-win32.whl", hash = "sha256:0f22ba4e9041549a5a3f5a545169dda52fa0aa7b5ef46b336cbe6679c4c3c134"}, + {file = "pyzmq-25.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:0644c0d5c73e4bfeee8148f638ab16ad783df1c4d6c2f968552a26a43fb002a1"}, + {file = "pyzmq-25.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c5eb4b17d73b1fc208a4faa6b5918983ccc961770aa37741891f61db302dae4e"}, + {file = "pyzmq-25.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:649dd55948144a108041397f07c1299086ce1c85c2e166831db3a33dac1d0c7f"}, + {file = "pyzmq-25.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c99fd8d3efc138d6a7fb1e822133f62bb18ffec66dc6d398dcb2ac2ab8eb2cb0"}, + {file = "pyzmq-25.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d72d69d4bb37c05a446d10bc40b391cf8fb7572654fb73fa69e7d2a395197e65"}, + {file = "pyzmq-25.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:036dbf8373aed4ccf56d58c561b23601b8f33919ec1093d8c77b37ac1259702d"}, + {file = "pyzmq-25.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:861c37649c75a2ecfc2034a32b9d5ca744e1e0cddcbf65afbd8027cf7d9755be"}, + {file = "pyzmq-25.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:92f04d63aecbb71d41f7db5f988167ef429f96d8197fd46684688cdb513e8a2e"}, + {file = "pyzmq-25.0.1-cp36-cp36m-win32.whl", hash = "sha256:866a4e918f1f4b2f83e9982b817df257910e3e50e456ffa74f141a10adcd11d1"}, + {file = "pyzmq-25.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:ec29c880b82cd38a63810a93b77e13f167e05732049101947772eed9ae805097"}, + {file = "pyzmq-25.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0241a334e39aa74e4ba0ae5f9e29521f1b48b8d56bf707f25f322c04eb423e99"}, + {file = "pyzmq-25.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3b7032f55b1ed2cd8c349a89e467dca2338b7765fab82cb64c3504e49adaf51"}, + {file = "pyzmq-25.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:960f98f562ee6a50ecf283bc62479d00f5ee10e9068a21683b9e961cd87c9261"}, + {file = "pyzmq-25.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:835da498b71570d56e5526de4d5b36fa10dd9b8a82e2c405f963afeb51ff5bdc"}, + {file = "pyzmq-25.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:21de2ef6099fa8d6a3c2dc15aaca58e9f9ffdcc7b82a246590aa9564815699d9"}, + {file = "pyzmq-25.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e448a5a294958e915a7e1b664e6fbfcd3814989d381fb068673317f6f3ea3f8"}, + {file = "pyzmq-25.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40d909bdc8a2d64ad260925154712602ee6a0425ae0b08bce78a19adfdc2f05b"}, + {file = "pyzmq-25.0.1-cp37-cp37m-win32.whl", hash = "sha256:6ff37f2b818df25c887fd40bb434569db7ff66b35f5dfff6f40cc476aee92e3f"}, + {file = "pyzmq-25.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f66ee27a0221771bbaa2cce456e8ca890569c3d18b08b955eb6420c12516537c"}, + {file = "pyzmq-25.0.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:1003bbae89435eadec03b4fa3bb6516dd1529fb09ae5704284f7400cc77009ba"}, + {file = "pyzmq-25.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dde7a65a8bfa88aa1721add504320f8344272542291ce4e7c77993fa32901567"}, + {file = "pyzmq-25.0.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:20b6155429d3b57e9e7bd11f1680985ef8b5b0868f1a64073fb8c01326c7c80c"}, + {file = "pyzmq-25.0.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e37a764cbf91c1ed9a02e4fede79a414284aca2a0b7d92d82a3c7b82d678ec2d"}, + {file = "pyzmq-25.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa56a362066b3a853a64d35693a08046f640961efcc0e7643768916403e72e70"}, + {file = "pyzmq-25.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c4bdf1241886d39d816535d3ef9fc325bbf02470c9fd5f2cb62706eeb834f7f2"}, + {file = "pyzmq-25.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:446acbac24427ef42bff61a807ddcad8d03df78fb976184a4d7d6f4b1e7d8a67"}, + {file = "pyzmq-25.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b39847501d229e5fab155d88a565edfb182cdd3f7046f15a7f2df9c77cdc422d"}, + {file = "pyzmq-25.0.1-cp38-cp38-win32.whl", hash = "sha256:cba6b81b653d789d76e438c2e77b49f610b23e84b3bb43b99100f08a0a5d637b"}, + {file = "pyzmq-25.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:6eca6b90c4fb290efd27582780b5eaf048887a32b2c5fcd6330819192cb07b38"}, + {file = "pyzmq-25.0.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:58207a6709e53b723105bac6bb3c6795ee134f7e71351f39c09d52ac235c6b0d"}, + {file = "pyzmq-25.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c62084f37682e7ee4064e8310078be4f6f7687bf528ae5761e2ba7216c5b8949"}, + {file = "pyzmq-25.0.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9c44e9f04f8ed99c6f2e9e49f29d400d7557dd9e9e3f64e1e8a595aedc4258a2"}, + {file = "pyzmq-25.0.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c635d1c40d341835066931a018e378428dfbe0347ed4bb45a6b57f7d8c34196e"}, + {file = "pyzmq-25.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef93b5574c9ff36b4be376555efd369bd55b99bcc7be72f23bd38102dd9392b"}, + {file = "pyzmq-25.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44bc81099ab33388f6c061c1b194307d877428cb2b18282d0385584d5c73ed72"}, + {file = "pyzmq-25.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6d988844ed6caa21b0076b64671e83a136d93c57f1ae5a72b915661af55d313b"}, + {file = "pyzmq-25.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9d5eb6e88ae8a8734f239ffe1ed90559a426cf5b859b8ee66e0cd43fc5daf5c9"}, + {file = "pyzmq-25.0.1-cp39-cp39-win32.whl", hash = "sha256:f6b45db9de4c8adbf5fda58e827a32315d282cfb01e54dc74e7c7ccc0988c010"}, + {file = "pyzmq-25.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:47eeb94b78aa442568b85ad28f85bd37a9c3c34d052cbf8ebf8622c45f23a9cd"}, + {file = "pyzmq-25.0.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0ed7475f3adf0c7750d75740b3267947b501a33f4625ceae709fda2e75ec9ed7"}, + {file = "pyzmq-25.0.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6d09c22ed4d0afcc662d17c2429a03fc1fae7fe7e3bc1f413e744bccfeaabdc3"}, + {file = "pyzmq-25.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:703ec5f5a8369c09d8f3eb626358bdb590a2b1375bcce8b7da01b3a03f8b8668"}, + {file = "pyzmq-25.0.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aea31cc0d1f6c3fb4685db08b4c771545cf3fed3c4b4c8942c0a4e97042ec8"}, + {file = "pyzmq-25.0.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b1c03b942557bb366fd3dc377a15763d5d688de1328228136c75e50f968333cc"}, + {file = "pyzmq-25.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4e8a5ced9d92837f52ccdae6351c627b5012669727bc3eede2dc0f581eca1d0e"}, + {file = "pyzmq-25.0.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d78f840d88244272fb7252e47522b1179833aff7ec64583bda3d21259c9c2c20"}, + {file = "pyzmq-25.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c3f78fa80780e24d294f9421123cb3bd3b68677953c53da85273a22d1c983298"}, + {file = "pyzmq-25.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f6de4305e02560111a5d4555758faa85d44a5bff70cccff58dbf30c81a079f0"}, + {file = "pyzmq-25.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:34a1b1a8ce9b20e01aba71b9279d9b1d4e5980a6a4e42092180e16628a444ca1"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:625759a0112af7c3fb560de5724d749729f00b901f7625d1a3f3fb38897544b1"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cff159b21438c24476a49865f3d5700c9cc5833600661bc0e672decec2ff357"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cc47652d990de9ef967c494c526d73920ef064fef0444355a7cebec6fc50542"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44db5162a6881f7d740dec65917f38f9bfbc5ad9a10e06d7d5deebb27eb63939"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f38bf2c60a3f7b87cf5177043eb7a331a4f53bc9305a2452decbd42ad0c98741"}, + {file = "pyzmq-25.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b1cf4becd15669bc62a41c1b1bb742e22ac25965134e4254cde82a4dc2554b1b"}, + {file = "pyzmq-25.0.1.tar.gz", hash = "sha256:44a24f7ce44e70d20e2a4c9ba5af70b4611df7a4b920eed2c8e0bdd5a5af225f"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "requests" +version = "2.28.2" +description = "Python HTTP for Humans." +category = "dev" +optional = false +python-versions = ">=3.7, <4" +files = [ + {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, + {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "scipy" +version = "1.10.1" +description = "Fundamental algorithms for scientific computing in Python" +category = "main" +optional = false +python-versions = "<3.12,>=3.8" +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, + {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +] + +[package.dependencies] +numpy = ">=1.19.5,<1.27.0" + +[package.extras] +dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "setuptools" +version = "67.6.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, + {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "soupsieve" +version = "2.4" +description = "A modern CSS selector implementation for Beautiful Soup." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"}, + {file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"}, +] + +[[package]] +name = "sphinx" +version = "6.1.3" +description = "Python documentation generator" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "Sphinx-6.1.3.tar.gz", hash = "sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2"}, + {file = "sphinx-6.1.3-py3-none-any.whl", hash = "sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc"}, +] + +[package.dependencies] +alabaster = ">=0.7,<0.8" +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +docutils = ">=0.18,<0.20" +imagesize = ">=1.3" +importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.13" +requests = ">=2.25.0" +snowballstemmer = ">=2.0" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.5" + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] +test = ["cython", "html5lib", "pytest (>=4.6)"] + +[[package]] +name = "sphinx-rtd-theme" +version = "1.2.0" +description = "Read the Docs theme for Sphinx" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "sphinx_rtd_theme-1.2.0-py2.py3-none-any.whl", hash = "sha256:f823f7e71890abe0ac6aaa6013361ea2696fc8d3e1fa798f463e82bdb77eeff2"}, + {file = "sphinx_rtd_theme-1.2.0.tar.gz", hash = "sha256:a0d8bd1a2ed52e0b338cbe19c4b2eef3c5e7a048769753dac6a9f059c7b641b8"}, +] + +[package.dependencies] +docutils = "<0.19" +sphinx = ">=1.6,<7" +sphinxcontrib-jquery = {version = ">=2.0.0,<3.0.0 || >3.0.0", markers = "python_version > \"3\""} + +[package.extras] +dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "1.0.4" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, + {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, +] + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "1.0.2" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, + {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, +] + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.0.1" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, + {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, +] + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["html5lib", "pytest"] + +[[package]] +name = "sphinxcontrib-jquery" +version = "2.0.0" +description = "Extension to include jQuery on newer Sphinx releases" +category = "dev" +optional = false +python-versions = ">=2.7" +files = [ + {file = "sphinxcontrib-jquery-2.0.0.tar.gz", hash = "sha256:8fb65f6dba84bf7bcd1aea1f02ab3955ac34611d838bcc95d4983b805b234daa"}, + {file = "sphinxcontrib_jquery-2.0.0-py3-none-any.whl", hash = "sha256:ed47fa425c338ffebe3c37e1cdb56e30eb806116b85f01055b158c7057fdb995"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +description = "A sphinx extension which renders display math in HTML via JavaScript" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[package.extras] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-napoleon" +version = "0.7" +description = "Sphinx \"napoleon\" extension." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "sphinxcontrib-napoleon-0.7.tar.gz", hash = "sha256:407382beed396e9f2d7f3043fad6afda95719204a1e1a231ac865f40abcbfcf8"}, + {file = "sphinxcontrib_napoleon-0.7-py2.py3-none-any.whl", hash = "sha256:711e41a3974bdf110a484aec4c1a556799eb0b3f3b897521a018ad7e2db13fef"}, +] + +[package.dependencies] +pockets = ">=0.3" +six = ">=1.5.2" + +[[package]] +name = "sphinxcontrib-qthelp" +version = "1.0.3" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, + {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, +] + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "1.1.5" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, + {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, +] + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "stack-data" +version = "0.6.2" +description = "Extract data from python stack frames and tracebacks for informative displays" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"}, + {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "tinycss2" +version = "1.2.1" +description = "A tiny CSS parser" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, + {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["flake8", "isort", "pytest"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tornado" +version = "6.2" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +category = "dev" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, + {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"}, + {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"}, + {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"}, + {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"}, +] + +[[package]] +name = "traitlets" +version = "5.9.0" +description = "Traitlets Python configuration system" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, + {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] + +[[package]] +name = "typing-extensions" +version = "4.5.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, +] + +[[package]] +name = "urllib3" +version = "1.26.15" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "wcwidth" +version = "0.2.6" +description = "Measures the displayed width of unicode strings in a terminal" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, + {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8.1, <3.12" +content-hash = "6db74104fe03ad8e15db13092dc3255c6d48e0b731673b802be7f7f26258e021" diff --git a/pygam/__init__.py b/pygam/__init__.py index 912d3fd8..74dba53a 100644 --- a/pygam/__init__.py +++ b/pygam/__init__.py @@ -18,7 +18,19 @@ from pygam.terms import te from pygam.terms import intercept -__all__ = ['GAM', 'LinearGAM', 'LogisticGAM', 'GammaGAM', 'PoissonGAM', - 'InvGaussGAM', 'ExpectileGAM', 'l', 's', 'f', 'te', 'intercept'] +__all__ = [ + 'GAM', + 'LinearGAM', + 'LogisticGAM', + 'GammaGAM', + 'PoissonGAM', + 'InvGaussGAM', + 'ExpectileGAM', + 'l', + 's', + 'f', + 'te', + 'intercept', +] __version__ = '0.8.0' diff --git a/pygam/callbacks.py b/pygam/callbacks.py index ebe2f000..7890f040 100644 --- a/pygam/callbacks.py +++ b/pygam/callbacks.py @@ -23,6 +23,7 @@ def validate_callback_data(method): ------- validated callable """ + @wraps(method) def method_wrapper(*args, **kwargs): """ @@ -41,7 +42,7 @@ def method_wrapper(*args, **kwargs): # rename curret gam object if 'self' in kwargs: gam = kwargs['self'] - del(kwargs['self']) + del kwargs['self'] kwargs['gam'] = gam # loop once to check any missing @@ -51,8 +52,9 @@ def method_wrapper(*args, **kwargs): continue if e not in kwargs: missing.append(e) - assert len(missing) == 0, 'CallBack cannot reference: {}'.\ - format(', '.join(missing)) + assert len(missing) == 0, 'CallBack cannot reference: {}'.format( + ', '.join(missing) + ) # loop again to extract desired kwargs_subset = {} @@ -65,6 +67,7 @@ def method_wrapper(*args, **kwargs): return method_wrapper + def validate_callback(callback): """ validates a callback's on_loop_start and on_loop_end methods @@ -77,22 +80,27 @@ def validate_callback(callback): ------- validated callback """ - if not(hasattr(callback, '_validated')) or callback._validated == False: - assert hasattr(callback, 'on_loop_start') \ - or hasattr(callback, 'on_loop_end'), \ - 'callback must have `on_loop_start` or `on_loop_end` method' + if not (hasattr(callback, '_validated')) or callback._validated is False: + assert hasattr(callback, 'on_loop_start') or hasattr( + callback, 'on_loop_end' + ), 'callback must have `on_loop_start` or `on_loop_end` method' if hasattr(callback, 'on_loop_start'): - setattr(callback, 'on_loop_start', - validate_callback_data(callback.on_loop_start)) + setattr( + callback, + 'on_loop_start', + validate_callback_data(callback.on_loop_start), + ) if hasattr(callback, 'on_loop_end'): - setattr(callback, 'on_loop_end', - validate_callback_data(callback.on_loop_end)) + setattr( + callback, 'on_loop_end', validate_callback_data(callback.on_loop_end) + ) setattr(callback, '_validated', True) return callback class CallBack(Core): """CallBack class""" + def __init__(self, name=None): """ creates a CallBack instance @@ -111,6 +119,7 @@ def __init__(self, name=None): @validate_callback class Deviance(CallBack): """Deviance CallBack class""" + def __init__(self): """ creates a Deviance CallBack instance @@ -181,7 +190,7 @@ def on_loop_start(self, y, mu): ------- accuracy : np.array of length n """ - return np.mean(y == (mu>0.5)) + return np.mean(y == (mu > 0.5)) @validate_callback @@ -217,6 +226,7 @@ def on_loop_end(self, diff): """ return diff + @validate_callback class Coef(CallBack): def __init__(self): @@ -250,8 +260,4 @@ def on_loop_start(self, gam): return gam.coef_ -CALLBACKS = {'deviance': Deviance, - 'diffs': Diffs, - 'accuracy': Accuracy, - 'coef': Coef - } +CALLBACKS = {'deviance': Deviance, 'diffs': Diffs, 'accuracy': Accuracy, 'coef': Coef} diff --git a/pygam/core.py b/pygam/core.py index ef26cfa7..dd7fc277 100644 --- a/pygam/core.py +++ b/pygam/core.py @@ -8,7 +8,16 @@ from pygam.utils import round_to_n_decimal_places, flatten -def nice_repr(name, param_kvs, line_width=30, line_offset=5, decimals=3, args=None, flatten_attrs=True): + +def nice_repr( + name, + param_kvs, + line_width=30, + line_offset=5, + decimals=3, + args=None, + flatten_attrs=True, +): """ tool to do a nice repr of a class. @@ -34,7 +43,7 @@ class name out : str nicely formatted repr of class instance """ - if not param_kvs and not args : + if not param_kvs and not args: # if the object has no params it's easy return '{}()'.format(name) @@ -42,7 +51,7 @@ class name ks = list(param_kvs.keys()) vs = list(param_kvs.values()) idxs = np.argsort(ks) - param_kvs = [(ks[i],vs[i]) for i in idxs] + param_kvs = [(ks[i], vs[i]) for i in idxs] if args is not None: param_kvs = [(None, arg) for arg in args] + param_kvs @@ -51,7 +60,6 @@ class name out = '' current_line = name + '(' while len(param_kvs) > 0: - # flatten sub-term properties, but not `terms` k, v = param_kvs.pop() if flatten_attrs and k != 'terms': @@ -75,12 +83,12 @@ class name current_line += param else: out += current_line + '\n' - current_line = ' '*line_offset + param + current_line = ' ' * line_offset + param if len(current_line) < line_width and len(param_kvs) > 0: current_line += ' ' - out += current_line[:-1] # remove trailing comma + out += current_line[:-1] # remove trailing comma out += ')' return out @@ -114,7 +122,6 @@ def __init__(self, name=None, line_width=70, line_offset=3): if not hasattr(self, '_include'): self._include = [] - def __str__(self): """__str__ method""" if self._name is None: @@ -124,10 +131,14 @@ def __str__(self): def __repr__(self): """__repr__ method""" name = self.__class__.__name__ - return nice_repr(name, self.get_params(), - line_width=self._line_width, - line_offset=self._line_offset, - decimals=4, args=None) + return nice_repr( + name, + self.get_params(), + line_width=self._line_width, + line_offset=self._line_offset, + decimals=4, + args=None, + ) def get_params(self, deep=False): """ @@ -148,10 +159,13 @@ def get_params(self, deep=False): if deep is True: return attrs - return dict([(k,v) for k,v in list(attrs.items()) \ - if (k[0] != '_') \ - and (k[-1] != '_') \ - and (k not in self._exclude)]) + return dict( + [ + (k, v) + for k, v in list(attrs.items()) + if (k[0] != '_') and (k[-1] != '_') and (k not in self._exclude) + ] + ) def set_params(self, deep=False, force=False, **parameters): """ @@ -172,8 +186,10 @@ def set_params(self, deep=False, force=False, **parameters): """ param_names = self.get_params(deep=deep).keys() for parameter, value in parameters.items(): - if (parameter in param_names + if ( + parameter in param_names or force - or (hasattr(self, parameter) and parameter == parameter.strip('_'))): + or (hasattr(self, parameter) and parameter == parameter.strip('_')) + ): setattr(self, parameter, value) return self diff --git a/pygam/datasets/__init__.py b/pygam/datasets/__init__.py index 6650b25b..84204df7 100644 --- a/pygam/datasets/__init__.py +++ b/pygam/datasets/__init__.py @@ -16,15 +16,17 @@ from pygam.datasets.load_datasets import chicago from pygam.datasets.load_datasets import toy_interaction -__all__ = ['mcycle', - 'coal', - 'faithful', - 'trees', - 'wage', - 'default', - 'cake', - 'hepatitis', - 'toy_classification', - 'head_circumference', - 'chicago', - 'toy_interaction'] +__all__ = [ + 'mcycle', + 'coal', + 'faithful', + 'trees', + 'wage', + 'default', + 'cake', + 'hepatitis', + 'toy_classification', + 'head_circumference', + 'chicago', + 'toy_interaction', +] diff --git a/pygam/datasets/load_datasets.py b/pygam/datasets/load_datasets.py index 2b2f30b3..69610654 100644 --- a/pygam/datasets/load_datasets.py +++ b/pygam/datasets/load_datasets.py @@ -15,10 +15,10 @@ def _clean_X_y(X, y): - """ensure that X and y data are float and correct shapes - """ + """ensure that X and y data are float and correct shapes""" return make_2d(X, verbose=False).astype('float'), y.astype('float') + def mcycle(return_X_y=True): """motorcyle acceleration dataset @@ -51,6 +51,7 @@ def mcycle(return_X_y=True): return _clean_X_y(X, y) return motor + def coal(return_X_y=True): """coal-mining accidents dataset @@ -83,10 +84,11 @@ def coal(return_X_y=True): coal = pd.read_csv(PATH + '/coal.csv', index_col=0) if return_X_y: y, x = np.histogram(coal.values, bins=150) - X = x[:-1] + np.diff(x)/2 # get midpoints of bins + X = x[:-1] + np.diff(x) / 2 # get midpoints of bins return _clean_X_y(X, y) return coal + def faithful(return_X_y=True): """old-faithful dataset @@ -106,7 +108,8 @@ def faithful(return_X_y=True): ----- The (X, y) tuple is a processed version of the otherwise raw DataFrame. - A histogram of 200 bins has been computed describing the wating time between eruptions. + A histogram of 200 bins has been computed describing the wating time + between eruptions. X contains the midpoints of histogram bins. y contains the count in each histogram bin. @@ -119,10 +122,11 @@ def faithful(return_X_y=True): faithful = pd.read_csv(PATH + '/faithful.csv', index_col=0) if return_X_y: y, x = np.histogram(faithful['eruptions'], bins=200) - X = x[:-1] + np.diff(x)/2 # get midpoints of bins + X = x[:-1] + np.diff(x) / 2 # get midpoints of bins return _clean_X_y(X, y) return faithful + def wage(return_X_y=True): """wage dataset @@ -153,11 +157,12 @@ def wage(return_X_y=True): wage = pd.read_csv(PATH + '/wage.csv', index_col=0) if return_X_y: X = wage[['year', 'age', 'education']].values - X[:,-1] = np.unique(X[:,-1], return_inverse=True)[1] + X[:, -1] = np.unique(X[:, -1], return_inverse=True)[1] y = wage['wage'].values return _clean_X_y(X, y) return wage + def trees(return_X_y=True): """cherry trees dataset @@ -190,6 +195,7 @@ def trees(return_X_y=True): return _clean_X_y(X, y) return trees + def default(return_X_y=True): """credit default dataset @@ -220,13 +226,14 @@ def default(return_X_y=True): default = pd.read_csv(PATH + '/default.csv', index_col=0) if return_X_y: default = default.values - default[:,0] = np.unique(default[:,0], return_inverse=True)[1] - default[:,1] = np.unique(default[:,1], return_inverse=True)[1] - X = default[:,1:] - y = default[:,0] + default[:, 0] = np.unique(default[:, 0], return_inverse=True)[1] + default[:, 1] = np.unique(default[:, 1], return_inverse=True)[1] + X = default[:, 1:] + y = default[:, 0] return _clean_X_y(X, y) return default + def cake(return_X_y=True): """cake dataset @@ -257,12 +264,13 @@ def cake(return_X_y=True): cake = pd.read_csv(PATH + '/cake.csv', index_col=0) if return_X_y: X = cake[['recipe', 'replicate', 'temperature']].values - X[:,0] = np.unique(cake.values[:,1], return_inverse=True)[1] - X[:,1] -= 1 + X[:, 0] = np.unique(cake.values[:, 1], return_inverse=True)[1] + X[:, 1] -= 1 y = cake['angle'].values return _clean_X_y(X, y) return cake + def hepatitis(return_X_y=True): """hepatitis in Bulgaria dataset @@ -303,6 +311,7 @@ def hepatitis(return_X_y=True): return _clean_X_y(X, y) return hep + def toy_classification(return_X_y=True, n=5000): """toy classification dataset with irrelevant features @@ -339,26 +348,35 @@ def toy_classification(return_X_y=True, n=5000): Also, this dataset is randomly generated and will vary each time. """ # make features - X = np.random.rand(n,5) * 10 - 5 - cat = np.random.randint(0,4, n) + X = np.random.rand(n, 5) * 10 - 5 + cat = np.random.randint(0, 4, n) X = np.c_[X, cat] # make observations - log_odds = (-0.5*X[:,0]**2) + 5 +(-0.5*X[:,1]**2) + np.mod(X[:,-1], 2)*-30 - p = 1/(1+np.exp(-log_odds)).squeeze() - y = (np.random.rand(n) < p).astype(np.int) + log_odds = ( + (-0.5 * X[:, 0] ** 2) + 5 + (-0.5 * X[:, 1] ** 2) + np.mod(X[:, -1], 2) * -30 + ) + p = 1 / (1 + np.exp(-log_odds)).squeeze() + y = (np.random.rand(n) < p).astype(int) if return_X_y: return X, y else: - return pd.DataFrame(np.c_[X, y], columns=[['continuous0', - 'continuous1', - 'irrelevant0', - 'irrelevant1', - 'irrelevant2', - 'categorical0', - 'observations' - ]]) + return pd.DataFrame( + np.c_[X, y], + columns=[ + [ + 'continuous0', + 'continuous1', + 'irrelevant0', + 'irrelevant1', + 'irrelevant2', + 'categorical0', + 'observations', + ] + ], + ) + def head_circumference(return_X_y=True): """head circumference for dutch boys @@ -390,6 +408,7 @@ def head_circumference(return_X_y=True): return _clean_X_y(X, y) return head + def chicago(return_X_y=True): """Chicago air pollution and death rate data @@ -441,6 +460,7 @@ def chicago(return_X_y=True): else: return chi + def toy_interaction(return_X_y=True, n=50000, stddev=0.1): """a sinusoid modulated by a linear function @@ -478,10 +498,10 @@ def toy_interaction(return_X_y=True, n=50000, stddev=0.1): Source: """ - X = np.random.uniform(-1,1, size=(n, 2)) + X = np.random.uniform(-1, 1, size=(n, 2)) X[:, 1] *= 5 - y = np.sin(X[:,0] * 2 * np.pi * 1.5) * X[:,1] + y = np.sin(X[:, 0] * 2 * np.pi * 1.5) * X[:, 1] y += np.random.randn(len(X)) * stddev if return_X_y: diff --git a/pygam/distributions.py b/pygam/distributions.py index 25eabc6f..18e95446 100644 --- a/pygam/distributions.py +++ b/pygam/distributions.py @@ -20,6 +20,7 @@ def multiplied(self, y, mu, weights=None, **kwargs): if weights is None: weights = np.ones_like(mu) return deviance(self, y, mu, **kwargs) * weights + return multiplied @@ -29,6 +30,7 @@ def divided(self, mu, weights=None, **kwargs): if weights is None: weights = np.ones_like(mu) return V(self, mu, **kwargs) / weights + return divided @@ -83,8 +85,7 @@ def phi(self, y, mu, edof, weights): if self._known_scale: return self.scale else: - return (np.sum(weights * self.V(mu)**-1 * (y - mu)**2) / - (len(mu) - edof)) + return np.sum(weights * self.V(mu) ** -1 * (y - mu) ** 2) / (len(mu) - edof) @abstractmethod def sample(self, mu): @@ -195,7 +196,7 @@ def deviance(self, y, mu, scaled=True): ------- deviances : np.array of length n """ - dev = (y - mu)**2 + dev = (y - mu) ** 2 if scaled: dev /= self.scale return dev @@ -242,7 +243,7 @@ def __init__(self, levels=1): if levels is None: levels = 1 self.levels = levels - super(BinomialDist, self).__init__(name='binomial', scale=1.) + super(BinomialDist, self).__init__(name='binomial', scale=1.0) self._exclude.append('scale') def log_pdf(self, y, mu, weights=None): @@ -328,8 +329,7 @@ def sample(self, mu): """ number_of_trials = self.levels success_probability = mu / number_of_trials - return np.random.binomial(n=number_of_trials, p=success_probability, - size=None) + return np.random.binomial(n=number_of_trials, p=success_probability, size=None) class PoissonDist(Distribution): @@ -349,7 +349,7 @@ def __init__(self): ------- self """ - super(PoissonDist, self).__init__(name='poisson', scale=1.) + super(PoissonDist, self).__init__(name='poisson', scale=1.0) self._exclude.append('scale') def log_pdf(self, y, mu, weights=None): @@ -547,7 +547,7 @@ def sample(self, mu): """ # in numpy.random.gamma, `shape` is the parameter sometimes denoted by # `k` that corresponds to `nu` in S. Wood (2006) Table 2.1 - shape = 1. / self.scale + shape = 1.0 / self.scale # in numpy.random.gamma, `scale` is the parameter sometimes denoted by # `theta` that corresponds to mu / nu in S. Wood (2006) Table 2.1 scale = mu / shape @@ -595,7 +595,7 @@ def log_pdf(self, y, mu, weights=None): if weights is None: weights = np.ones_like(mu) gamma = weights / self.scale - return sp.stats.invgauss.logpdf(y, mu, scale=1./gamma) + return sp.stats.invgauss.logpdf(y, mu, scale=1.0 / gamma) @divide_weights def V(self, mu): @@ -636,7 +636,7 @@ def deviance(self, y, mu, scaled=True): ------- deviances : np.array of length n """ - dev = ((y - mu)**2) / (mu**2 * y) + dev = ((y - mu) ** 2) / (mu**2 * y) if scaled: dev /= self.scale @@ -658,9 +658,10 @@ def sample(self, mu): return np.random.wald(mean=mu, scale=self.scale, size=None) -DISTRIBUTIONS = {'normal': NormalDist, - 'poisson': PoissonDist, - 'binomial': BinomialDist, - 'gamma': GammaDist, - 'inv_gauss': InvGaussDist - } +DISTRIBUTIONS = { + 'normal': NormalDist, + 'poisson': PoissonDist, + 'binomial': BinomialDist, + 'gamma': GammaDist, + 'inv_gauss': InvGaussDist, +} diff --git a/pygam/links.py b/pygam/links.py index 27179bc5..6f429939 100644 --- a/pygam/links.py +++ b/pygam/links.py @@ -23,6 +23,7 @@ def __init__(self, name=None): """ super(Link, self).__init__(name=name) + class IdentityLink(Link): def __init__(self): """ @@ -85,6 +86,7 @@ def gradient(self, mu, dist): """ return np.ones_like(mu) + class LogitLink(Link): def __init__(self): """ @@ -146,7 +148,8 @@ def gradient(self, mu, dist): ------- grad : np.array of length n """ - return dist.levels/(mu*(dist.levels - mu)) + return dist.levels / (mu * (dist.levels - mu)) + class LogLink(Link): def __init__(self): @@ -208,7 +211,8 @@ def gradient(self, mu, dist): ------- grad : np.array of length n """ - return 1. / mu + return 1.0 / mu + class InverseLink(Link): def __init__(self): @@ -239,7 +243,7 @@ def link(self, mu, dist): ------- lp : np.array of length n """ - return mu ** -1. + return mu**-1.0 def mu(self, lp, dist): """ @@ -255,7 +259,7 @@ def mu(self, lp, dist): ------- mu : np.array of length n """ - return lp ** -1. + return lp**-1.0 def gradient(self, mu, dist): """ @@ -270,7 +274,8 @@ def gradient(self, mu, dist): ------- grad : np.array of length n """ - return -1 * mu**-2. + return -1 * mu**-2.0 + class InvSquaredLink(Link): def __init__(self): @@ -301,7 +306,7 @@ def link(self, mu, dist): ------- lp : np.array of length n """ - return mu ** -2. + return mu**-2.0 def mu(self, lp, dist): """ @@ -317,7 +322,7 @@ def mu(self, lp, dist): ------- mu : np.array of length n """ - return lp ** -0.5 + return lp**-0.5 def gradient(self, mu, dist): """ @@ -332,12 +337,13 @@ def gradient(self, mu, dist): ------- grad : np.array of length n """ - return -2 * mu**-3. + return -2 * mu**-3.0 -LINKS = {'identity': IdentityLink, - 'log': LogLink, - 'logit': LogitLink, - 'inverse': InverseLink, - 'inv_squared': InvSquaredLink - } +LINKS = { + 'identity': IdentityLink, + 'log': LogLink, + 'logit': LogitLink, + 'inverse': InverseLink, + 'inv_squared': InvSquaredLink, +} diff --git a/pygam/penalties.py b/pygam/penalties.py index aee1d2a0..2ca239c9 100644 --- a/pygam/penalties.py +++ b/pygam/penalties.py @@ -1,6 +1,7 @@ """ Penalty matrix generators """ +import warnings import scipy as sp import numpy as np @@ -30,25 +31,29 @@ def derivative(n, coef, derivative=2, periodic=False): """ if n == 1: # no derivative for constant functions - return sp.sparse.csc_matrix(0.) - D = sparse_diff(sp.sparse.identity(n + 2*derivative*periodic).tocsc(), n=derivative).tolil() + return sp.sparse.csc_matrix(0.0) + D = sparse_diff( + sp.sparse.identity(n + 2 * derivative * periodic).tocsc(), n=derivative + ).tolil() if periodic: # wrap penalty cols = D[:, :derivative] - D[:, -2 * derivative:-derivative] += cols * (-1) ** derivative + D[:, -2 * derivative : -derivative] += cols * (-1) ** derivative # do symmetric operation on lower half of matrix - n_rows = int((n + 2 * derivative)/2) + n_rows = int((n + 2 * derivative) / 2) D[-n_rows:] = D[:n_rows][::-1, ::-1] # keep only the center of the augmented matrix D = D[derivative:-derivative, derivative:-derivative] return D.dot(D.T).tocsc() + def periodic(n, coef, derivative=2, _penalty=derivative): return _penalty(n, coef, derivative=derivative, periodic=True) + def l2(n, coef): """ Builds a penalty matrix for P-Splines with categorical features. @@ -68,6 +73,7 @@ def l2(n, coef): """ return sp.sparse.eye(n).tocsc() + def monotonicity_(n, coef, increasing=True): """ Builds a penalty matrix for P-Splines with continuous features. @@ -86,13 +92,14 @@ def monotonicity_(n, coef, increasing=True): penalty matrix : sparse csc matrix of shape (n,n) """ if n != len(coef.ravel()): - raise ValueError('dimension mismatch: expected n equals len(coef), '\ - 'but found n = {}, coef.shape = {}.'\ - .format(n, coef.shape)) + raise ValueError( + 'dimension mismatch: expected n equals len(coef), ' + 'but found n = {}, coef.shape = {}.'.format(n, coef.shape) + ) - if n==1: + if n == 1: # no monotonic penalty for constant functions - return sp.sparse.csc_matrix(0.) + return sp.sparse.csc_matrix(0.0) if increasing: # only penalize the case where coef_i-1 > coef_i @@ -105,6 +112,7 @@ def monotonicity_(n, coef, increasing=True): D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask return D.dot(D.T).tocsc() + def monotonic_inc(n, coef): """ Builds a penalty matrix for P-Splines with continuous features. @@ -122,6 +130,7 @@ def monotonic_inc(n, coef): """ return monotonicity_(n, coef, increasing=True) + def monotonic_dec(n, coef): """ Builds a penalty matrix for P-Splines with continuous features. @@ -140,6 +149,7 @@ def monotonic_dec(n, coef): """ return monotonicity_(n, coef, increasing=False) + def convexity_(n, coef, convex=True): """ Builds a penalty matrix for P-Splines with continuous features. @@ -158,13 +168,14 @@ def convexity_(n, coef, convex=True): penalty matrix : sparse csc matrix of shape (n,n) """ if n != len(coef.ravel()): - raise ValueError('dimension mismatch: expected n equals len(coef), '\ - 'but found n = {}, coef.shape = {}.'\ - .format(n, coef.shape)) + raise ValueError( + 'dimension mismatch: expected n equals len(coef), ' + 'but found n = {}, coef.shape = {}.'.format(n, coef.shape) + ) - if n==1: + if n == 1: # no convex penalty for constant functions - return sp.sparse.csc_matrix(0.) + return sp.sparse.csc_matrix(0.0) if convex: mask = sp.sparse.diags((np.diff(coef.ravel(), n=2) < 0).astype(float)) @@ -175,6 +186,7 @@ def convexity_(n, coef, convex=True): D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask return D.dot(D.T).tocsc() + def convex(n, coef): """ Builds a penalty matrix for P-Splines with continuous features. @@ -193,6 +205,7 @@ def convex(n, coef): """ return convexity_(n, coef, convex=True) + def concave(n, coef): """ Builds a penalty matrix for P-Splines with continuous features. @@ -211,6 +224,7 @@ def concave(n, coef): """ return convexity_(n, coef, convex=False) + # def circular(n, coef): # """ # Builds a penalty matrix for P-Splines with continuous features. @@ -242,6 +256,7 @@ def concave(n, coef): # P = sp.sparse.vstack([row, sp.sparse.csc_matrix((n-2, n)), row[::-1]]) # return P.tocsc() + def none(n, coef): """ Build a matrix of zeros for features that should go unpenalized @@ -259,7 +274,8 @@ def none(n, coef): """ return sp.sparse.csc_matrix(np.zeros((n, n))) -def wrap_penalty(p, fit_linear, linear_penalty=0.): + +def wrap_penalty(p, fit_linear, linear_penalty=0.0): """ tool to account for unity penalty on the linear term of any feature. @@ -280,16 +296,18 @@ def wrap_penalty(p, fit_linear, linear_penalty=0.): wrapped_p : callable modified penalty-matrix-generating function """ + def wrapped_p(n, *args): if fit_linear: if n == 1: return sp.sparse.block_diag([linear_penalty], format='csc') - return sp.sparse.block_diag([linear_penalty, - p(n-1, *args)], format='csc') + return sp.sparse.block_diag([linear_penalty, p(n - 1, *args)], format='csc') else: return p(n, *args) + return wrapped_p + def sparse_diff(array, n=1, axis=-1): """ A ported sparse version of np.diff. @@ -310,8 +328,9 @@ def sparse_diff(array, n=1, axis=-1): but 'axis' dimension is smaller by 'n'. """ if (n < 0) or (int(n) != n): - raise ValueError('Expected order is non-negative integer, '\ - 'but found: {}'.format(n)) + raise ValueError( + 'Expected order is non-negative integer, ' 'but found: {}'.format(n) + ) if not sp.sparse.issparse(array): warnings.warn('Array is not sparse. Consider using numpy.diff') @@ -319,27 +338,29 @@ def sparse_diff(array, n=1, axis=-1): return array nd = array.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) - A = sparse_diff(array, n-1, axis=axis) + A = sparse_diff(array, n - 1, axis=axis) return A[slice1] - A[slice2] -PENALTIES = {'auto': 'auto', - 'derivative': derivative, - 'l2': l2, - 'none': none, - 'periodic': periodic - } - -CONSTRAINTS = {'convex': convex, - 'concave': concave, - 'monotonic_inc': monotonic_inc, - 'monotonic_dec': monotonic_dec, - 'none': none - } +PENALTIES = { + 'auto': 'auto', + 'derivative': derivative, + 'l2': l2, + 'none': none, + 'periodic': periodic, +} + +CONSTRAINTS = { + 'convex': convex, + 'concave': concave, + 'monotonic_inc': monotonic_inc, + 'monotonic_dec': monotonic_dec, + 'none': none, +} diff --git a/pygam/pygam.py b/pygam/pygam.py index ae8c8d64..16e5c24b 100644 --- a/pygam/pygam.py +++ b/pygam/pygam.py @@ -9,43 +9,43 @@ import numpy as np import scipy as sp -from scipy import stats +from scipy import stats # noqa: F401 from pygam.core import Core -from pygam.penalties import derivative -from pygam.penalties import l2 -from pygam.penalties import monotonic_inc -from pygam.penalties import monotonic_dec -from pygam.penalties import convex -from pygam.penalties import concave -from pygam.penalties import none -from pygam.penalties import wrap_penalty -from pygam.penalties import PENALTIES, CONSTRAINTS - -from pygam.distributions import Distribution -from pygam.distributions import NormalDist -from pygam.distributions import BinomialDist -from pygam.distributions import PoissonDist -from pygam.distributions import GammaDist -from pygam.distributions import InvGaussDist -from pygam.distributions import DISTRIBUTIONS - -from pygam.links import Link -from pygam.links import IdentityLink -from pygam.links import LogitLink -from pygam.links import LogLink -from pygam.links import InverseLink -from pygam.links import InvSquaredLink -from pygam.links import LINKS - -from pygam.callbacks import CallBack -from pygam.callbacks import Deviance -from pygam.callbacks import Diffs -from pygam.callbacks import Accuracy -from pygam.callbacks import Coef -from pygam.callbacks import validate_callback -from pygam.callbacks import CALLBACKS +from pygam.penalties import derivative # noqa: F401 +from pygam.penalties import l2 # noqa: F401 +from pygam.penalties import monotonic_inc # noqa: F401 +from pygam.penalties import monotonic_dec # noqa: F401 +from pygam.penalties import convex # noqa: F401 +from pygam.penalties import concave # noqa: F401 +from pygam.penalties import none # noqa: F401 +from pygam.penalties import wrap_penalty # noqa: F401 +from pygam.penalties import PENALTIES, CONSTRAINTS # noqa: F401 + +from pygam.distributions import Distribution # noqa: F401 +from pygam.distributions import NormalDist # noqa: F401 +from pygam.distributions import BinomialDist # noqa: F401 +from pygam.distributions import PoissonDist # noqa: F401 +from pygam.distributions import GammaDist # noqa: F401 +from pygam.distributions import InvGaussDist # noqa: F401 +from pygam.distributions import DISTRIBUTIONS # noqa: F401 + +from pygam.links import Link # noqa: F401 +from pygam.links import IdentityLink # noqa: F401 +from pygam.links import LogitLink # noqa: F401 +from pygam.links import LogLink # noqa: F401 +from pygam.links import InverseLink # noqa: F401 +from pygam.links import InvSquaredLink # noqa: F401 +from pygam.links import LINKS # noqa: F401 + +from pygam.callbacks import CallBack # noqa: F401 +from pygam.callbacks import Deviance # noqa: F401 +from pygam.callbacks import Diffs # noqa: F401 +from pygam.callbacks import Accuracy # noqa: F401 +from pygam.callbacks import Coef # noqa: F401 +from pygam.callbacks import validate_callback # noqa: F401 +from pygam.callbacks import CALLBACKS # noqa: F401 from pygam.utils import check_y from pygam.utils import check_X @@ -58,7 +58,7 @@ from pygam.utils import TablePrinter from pygam.utils import space_row from pygam.utils import sig_code -from pygam.utils import b_spline_basis +from pygam.utils import b_spline_basis # noqa: F401 from pygam.utils import combine from pygam.utils import cholesky from pygam.utils import check_param @@ -66,17 +66,17 @@ from pygam.utils import NotPositiveDefiniteError from pygam.utils import OptimizationError -from pygam.terms import Term -from pygam.terms import Intercept, intercept -from pygam.terms import LinearTerm, l -from pygam.terms import SplineTerm, s -from pygam.terms import FactorTerm, f -from pygam.terms import TensorTerm, te -from pygam.terms import TermList -from pygam.terms import MetaTermMixin +from pygam.terms import Term # noqa: F401 +from pygam.terms import Intercept, intercept # noqa: F401 +from pygam.terms import LinearTerm, l # noqa: F401 +from pygam.terms import SplineTerm, s # noqa: F401 +from pygam.terms import FactorTerm, f # noqa: F401 +from pygam.terms import TensorTerm, te # noqa: F401 +from pygam.terms import TermList # noqa: F401 +from pygam.terms import MetaTermMixin # noqa: F401 -EPS = np.finfo(np.float64).eps # machine epsilon +EPS = np.finfo(np.float64).eps # machine epsilon class GAM(Core, MetaTermMixin): @@ -147,11 +147,19 @@ class GAM(Core, MetaTermMixin): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - distribution='normal', link='identity', - callbacks=['deviance', 'diffs'], - fit_intercept=True, verbose=False, **kwargs): + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + distribution='normal', + link='identity', + callbacks=['deviance', 'diffs'], + fit_intercept=True, + verbose=False, + **kwargs, + ): self.max_iter = max_iter self.tol = tol self.distribution = distribution @@ -163,15 +171,17 @@ def __init__(self, terms='auto', max_iter=100, tol=1e-4, for k, v in kwargs.items(): if k not in self._plural: - raise TypeError('__init__() got an unexpected keyword argument {}'.format(k)) + raise TypeError( + '__init__() got an unexpected keyword argument {}'.format(k) + ) setattr(self, k, v) # internal settings - self._constraint_lam = 1e9 # regularization intensity for constraints - self._constraint_l2 = 1e-3 # diagononal loading to improve conditioning - self._constraint_l2_max = 1e-1 # maximum loading + self._constraint_lam = 1e9 # regularization intensity for constraints + self._constraint_l2 = 1e-3 # diagononal loading to improve conditioning + self._constraint_l2_max = 1e-1 # maximum loading # self._opt = 0 # use 0 for numerically stable optimizer, 1 for naive - self._term_location = 'terms' # for locating sub terms + self._term_location = 'terms' # for locating sub terms # self._include = ['lam'] # call super and exclude any variables @@ -218,22 +228,34 @@ def _validate_params(self): """ # fit_intercep if not isinstance(self.fit_intercept, bool): - raise ValueError('fit_intercept must be type bool, but found {}'\ - .format(self.fit_intercept.__class__)) + raise ValueError( + 'fit_intercept must be type bool, but found {}'.format( + self.fit_intercept.__class__ + ) + ) # terms - if (self.terms != 'auto') and not (isinstance(self.terms, (TermList, Term, type(None)))): - raise ValueError('terms must be a TermList, but found '\ - 'terms = {}'.format(self.terms)) + if (self.terms != 'auto') and not ( + isinstance(self.terms, (TermList, Term, type(None))) + ): + raise ValueError( + 'terms must be a TermList, but found ' 'terms = {}'.format(self.terms) + ) # max_iter - self.max_iter = check_param(self.max_iter, param_name='max_iter', - dtype='int', constraint='>=1', - iterable=False) + self.max_iter = check_param( + self.max_iter, + param_name='max_iter', + dtype='int', + constraint='>=1', + iterable=False, + ) # distribution - if not ((self.distribution in DISTRIBUTIONS) - or isinstance(self.distribution, Distribution)): + if not ( + (self.distribution in DISTRIBUTIONS) + or isinstance(self.distribution, Distribution) + ): raise ValueError('unsupported distribution {}'.format(self.distribution)) if self.distribution in DISTRIBUTIONS: self.distribution = DISTRIBUTIONS[self.distribution]() @@ -246,11 +268,11 @@ def _validate_params(self): # callbacks if not isiterable(self.callbacks): - raise ValueError('Callbacks must be iterable, but found {}'\ - .format(self.callbacks)) + raise ValueError( + 'Callbacks must be iterable, but found {}'.format(self.callbacks) + ) - if not all([c in CALLBACKS or - isinstance(c, CallBack) for c in self.callbacks]): + if not all([c in CALLBACKS or isinstance(c, CallBack) for c in self.callbacks]): raise ValueError('unsupported callback(s) {}'.format(self.callbacks)) callbacks = list(self.callbacks) for i, c in enumerate(self.callbacks): @@ -275,7 +297,9 @@ def _validate_data_dep_params(self, X): # terms if self.terms == 'auto': # one numerical spline per feature - self.terms = TermList(*[SplineTerm(feat, verbose=self.verbose) for feat in range(m_features)]) + self.terms = TermList( + *[SplineTerm(feat, verbose=self.verbose) for feat in range(m_features)] + ) elif self.terms is None: # no terms @@ -326,8 +350,9 @@ def loglikelihood(self, X, y, weights=None): if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') @@ -409,9 +434,14 @@ def predict_mu(self, X): if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) lp = self._linear_predictor(X) return self.link.mu(lp, self.distribution) @@ -452,9 +482,14 @@ def _modelmat(self, X, term=-1): modelmat : sparse matrix of len n_samples containing model matrix of the spline basis for selected features """ - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) return self.terms.build_columns(X, term=term) @@ -487,15 +522,16 @@ def _cholesky(self, A, **kwargs): return L except NotPositiveDefiniteError: if self.verbose: - warnings.warn('Matrix is not positive definite. \n'\ - 'Increasing l2 reg by factor of 10.', - stacklevel=2) + warnings.warn( + 'Matrix is not positive definite. \n' + 'Increasing l2 reg by factor of 10.', + stacklevel=2, + ) A -= constraint_l2 * diag constraint_l2 *= 10 A += constraint_l2 * diag - raise NotPositiveDefiniteError('Matrix is not positive \n' - 'definite.') + raise NotPositiveDefiniteError('Matrix is not positive \n' 'definite.') def _P(self): """ @@ -535,9 +571,9 @@ def _C(self): ------- C : sparse CSC matrix containing the model constraints in quadratic form """ - return self.terms.build_constraints(self.coef_, - self._constraint_lam, - self._constraint_l2) + return self.terms.build_constraints( + self.coef_, self._constraint_lam, self._constraint_l2 + ) def _pseudo_data(self, y, lp, mu): """ @@ -568,8 +604,8 @@ def _W(self, mu, weights, y=None): this makes me think that they are equivalent. - also, using non-sqrt mu with stable opt gives very small edofs for even lam=0.001 - and the parameter variance is huge. this seems strange to me. + also, using non-sqrt mu with stable opt gives very small edofs for even + lam=0.001 and the parameter variance is huge. this seems strange to me. computed [V * d(link)/d(mu)] ^(-1/2) by hand and the math checks out as hoped. @@ -588,9 +624,14 @@ def _W(self, mu, weights, y=None): ------- weights : sp..sparse array of shape (n_samples, n_samples) """ - return sp.sparse.diags((self.link.gradient(mu, self.distribution)**2 * - self.distribution.V(mu=mu) * - weights ** -1)**-0.5) + return sp.sparse.diags( + ( + self.link.gradient(mu, self.distribution) ** 2 + * self.distribution.V(mu=mu) + * weights**-1 + ) + ** -0.5 + ) def _mask(self, weights): """ @@ -613,11 +654,12 @@ def _mask(self, weights): """ mask = (np.abs(weights) >= np.sqrt(EPS)) * np.isfinite(weights) if mask.sum() == 0: - raise OptimizationError('PIRLS optimization has diverged.\n' + - 'Try increasing regularization, or specifying an initial value for self.coef_') + raise OptimizationError( + 'PIRLS optimization has diverged.\n' + + 'Try increasing regularization, or specifying an initial value for self.coef_' # noqa: E501 + ) return mask - def _initial_estimate(self, y, modelmat): """ Makes an inital estimate for the model coefficients. @@ -652,16 +694,19 @@ def _initial_estimate(self, y, modelmat): # transform the problem to the linear scale y = deepcopy(y).astype('float64') - y[y == 0] += .01 # edge case for log link, inverse link, and logit link - y[y == 1] -= .01 # edge case for logit link + y[y == 0] += 0.01 # edge case for log link, inverse link, and logit link + y[y == 1] -= 0.01 # edge case for logit link y_ = self.link.link(y, self.distribution) y_ = make_2d(y_, verbose=False) - assert np.isfinite(y_).all(), "transformed response values should be well-behaved." + assert np.isfinite( + y_ + ).all(), "transformed response values should be well-behaved." # solve the linear problem - return np.linalg.solve(load_diagonal(modelmat.T.dot(modelmat).A), - modelmat.T.dot(y_)) + return np.linalg.solve( + load_diagonal(modelmat.T.dot(modelmat).A), modelmat.T.dot(y_) + ) # not sure if this is faster... # return np.linalg.pinv(modelmat.T.dot(modelmat)).dot(modelmat.T.dot(y_)) @@ -683,32 +728,34 @@ def _pirls(self, X, Y, weights): ------- None """ - modelmat = self._modelmat(X) # build a basis matrix for the GLM + modelmat = self._modelmat(X) # build a basis matrix for the GLM n, m = modelmat.shape # initialize GLM coefficients if model is not yet fitted - if (not self._is_fitted + if ( + not self._is_fitted or len(self.coef_) != self.terms.n_coefs - or not np.isfinite(self.coef_).all()): - - # initialize the model - self.coef_ = self._initial_estimate(Y, modelmat) + or not np.isfinite(self.coef_).all() + ): + # initialize the model + self.coef_ = self._initial_estimate(Y, modelmat) - assert np.isfinite(self.coef_).all(), "coefficients should be well-behaved, but found: {}".format(self.coef_) + assert np.isfinite( + self.coef_ + ).all(), "coefficients should be well-behaved, but found: {}".format(self.coef_) P = self._P() - S = sp.sparse.diags(np.ones(m) * np.sqrt(EPS)) # improve condition + S = sp.sparse.diags(np.ones(m) * np.sqrt(EPS)) # improve condition # S += self._H # add any user-chosen minumum penalty to the diagonal # if we dont have any constraints, then do cholesky now if not self.terms.hasconstraint: E = self._cholesky(S + P, sparse=False, verbose=self.verbose) - min_n_m = np.min([m,n]) + min_n_m = np.min([m, n]) Dinv = np.zeros((min_n_m + m, m)).T for _ in range(self.max_iter): - # recompute cholesky if needed if self.terms.hasconstraint: P = self._P() @@ -716,17 +763,17 @@ def _pirls(self, X, Y, weights): E = self._cholesky(S + P + C, sparse=False, verbose=self.verbose) # forward pass - y = deepcopy(Y) # for simplicity + y = deepcopy(Y) # for simplicity lp = self._linear_predictor(modelmat=modelmat) mu = self.link.mu(lp, self.distribution) - W = self._W(mu, weights, y) # create pirls weight matrix + W = self._W(mu, weights, y) # create pirls weight matrix # check for weghts == 0, nan, and update mask = self._mask(W.diagonal()) - y = y[mask] # update - lp = lp[mask] # update - mu = mu[mask] # update - W = sp.sparse.diags(W.diagonal()[mask]) # update + y = y[mask] # update + lp = lp[mask] # update + mu = mu[mask] # update + W = sp.sparse.diags(W.diagonal()[mask]) # update # PIRLS Wood pg 183 pseudo_data = W.dot(self._pseudo_data(y, lp, mu)) @@ -734,12 +781,13 @@ def _pirls(self, X, Y, weights): # log on-loop-start stats self._on_loop_start(vars()) - WB = W.dot(modelmat[mask,:]) # common matrix product + WB = W.dot(modelmat[mask, :]) # common matrix product Q, R = np.linalg.qr(WB.A) if not np.isfinite(Q).all() or not np.isfinite(R).all(): - raise ValueError('QR decomposition produced NaN or Inf. '\ - 'Check X data.') + raise ValueError( + 'QR decomposition produced NaN or Inf. ' 'Check X data.' + ) # need to recompute the number of singular values min_n_m = np.min([m, n, mask.sum()]) @@ -747,16 +795,18 @@ def _pirls(self, X, Y, weights): # SVD U, d, Vt = np.linalg.svd(np.vstack([R, E])) - svd_mask = d <= (d.max() * np.sqrt(EPS)) # mask out small singular values - np.fill_diagonal(Dinv, d**-1) # invert the singular values - U1 = U[:min_n_m,:min_n_m] # keep only top corner of U + # mask out small singular values + # svd_mask = d <= (d.max() * np.sqrt(EPS)) + + np.fill_diagonal(Dinv, d**-1) # invert the singular values + U1 = U[:min_n_m, :min_n_m] # keep only top corner of U # update coefficients B = Vt.T.dot(Dinv).dot(U1.T).dot(Q.T) coef_new = B.dot(pseudo_data).flatten() - diff = np.linalg.norm(self.coef_ - coef_new)/np.linalg.norm(coef_new) - self.coef_ = coef_new # update + diff = np.linalg.norm(self.coef_ - coef_new) / np.linalg.norm(coef_new) + self.coef_ = coef_new # update # log on-loop-end stats self._on_loop_end(vars()) @@ -766,71 +816,15 @@ def _pirls(self, X, Y, weights): break # estimate statistics even if not converged - self._estimate_model_statistics(Y, modelmat, inner=None, BW=WB.T, B=B, - weights=weights, U1=U1) + self._estimate_model_statistics( + Y, modelmat, inner=None, BW=WB.T, B=B, weights=weights, U1=U1 + ) if diff < self.tol: return print('did not converge') return - # def _pirls_naive(self, X, y): - # """ - # Performs naive PIRLS iterations to estimate GAM coefficients - # - # Parameters - # --------- - # X : array-like of shape (n_samples, m_features) - # containing input data - # y : array-like of shape (n,) - # containing target data - # - # Returns - # ------- - # None - # """ - # modelmat = self._modelmat(X) # build a basis matrix for the GLM - # m = modelmat.shape[1] - # - # # initialize GLM coefficients - # if not self._is_fitted or len(self.coef_) != sum(self._n_coeffs): - # self.coef_ = np.ones(m) * np.sqrt(EPS) # allow more training - # - # P = self._P() # create penalty matrix - # P += sp.sparse.diags(np.ones(m) * np.sqrt(EPS)) # improve condition - # - # for _ in range(self.max_iter): - # lp = self._linear_predictor(modelmat=modelmat) - # mu = self.link.mu(lp, self.distribution) - # - # mask = self._mask(mu) - # mu = mu[mask] # update - # lp = lp[mask] # update - # - # if self.family == 'binomial': - # self.acc.append(self.accuracy(y=y[mask], mu=mu)) # log the training accuracy - # self.dev.append(self.deviance_(y=y[mask], mu=mu, scaled=False)) # log the training deviance - # - # weights = self._W(mu)**2 # PIRLS, added square for modularity - # pseudo_data = self._pseudo_data(y, lp, mu) # PIRLS - # - # BW = modelmat.T.dot(weights).tocsc() # common matrix product - # inner = sp.sparse.linalg.inv(BW.dot(modelmat) + P) # keep for edof - # - # coef_new = inner.dot(BW).dot(pseudo_data).flatten() - # diff = np.linalg.norm(self.coef_ - coef_new)/np.linalg.norm(coef_new) - # self.diffs.append(diff) - # self.coef_ = coef_new # update - # - # # check convergence - # if diff < self.tol: - # self.edof_ = self._estimate_edof(modelmat, inner, BW) - # self.aic_ = self._estimate_AIC(X, y, mu) - # self.aicc_ = self._estimate_AICc(X, y, mu) - # return - # - # print('did not converge') - def _on_loop_start(self, variables): """ performs on-loop-start actions like callbacks @@ -898,8 +892,9 @@ def fit(self, X, y, weights=None): if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') @@ -925,8 +920,8 @@ def fit(self, X, y, weights=None): return self def score(self, X, y, weights=None): - """ - method to compute the explained deviance for a trained model for a given X data and y labels + """compute the explained deviance for a trained model for a given X data and + y labels Parameters ---------- @@ -974,27 +969,35 @@ def deviance_residuals(self, X, y, weights=None, scaled=False): raise AttributeError('GAM has not been fitted. Call fit first.') y = check_y(y, self.link, self.distribution, verbose=self.verbose) - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) check_X_y(X, y) if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') mu = self.predict_mu(X) - sign = np.sign(y-mu) - return sign * self.distribution.deviance(y, mu, - weights=weights, - scaled=scaled) ** 0.5 + sign = np.sign(y - mu) + return ( + sign + * self.distribution.deviance(y, mu, weights=weights, scaled=scaled) ** 0.5 + ) - def _estimate_model_statistics(self, y, modelmat, inner=None, BW=None, - B=None, weights=None, U1=None): + def _estimate_model_statistics( + self, y, modelmat, inner=None, BW=None, B=None, weights=None, U1=None + ): """ method to compute all of the model statistics @@ -1035,16 +1038,24 @@ def _estimate_model_statistics(self, y, modelmat, inner=None, BW=None, self.statistics_['edof_per_coef'] = np.diagonal(U1.dot(U1.T)) self.statistics_['edof'] = self.statistics_['edof_per_coef'].sum() if not self.distribution._known_scale: - self.distribution.scale = self.distribution.phi(y=y, mu=mu, edof=self.statistics_['edof'], weights=weights) + self.distribution.scale = self.distribution.phi( + y=y, mu=mu, edof=self.statistics_['edof'], weights=weights + ) self.statistics_['scale'] = self.distribution.scale - self.statistics_['cov'] = (B.dot(B.T)) * self.distribution.scale # parameter covariances. no need to remove a W because we are using W^2. Wood pg 184 - self.statistics_['se'] = self.statistics_['cov'].diagonal()**0.5 + self.statistics_['cov'] = ( + B.dot(B.T) + ) * self.distribution.scale # parameter covariances. no need to remove a W because we are using W^2. Wood pg 184 # noqa: E501 + self.statistics_['se'] = self.statistics_['cov'].diagonal() ** 0.5 self.statistics_['AIC'] = self._estimate_AIC(y=y, mu=mu, weights=weights) self.statistics_['AICc'] = self._estimate_AICc(y=y, mu=mu, weights=weights) self.statistics_['pseudo_r2'] = self._estimate_r2(y=y, mu=mu, weights=weights) - self.statistics_['GCV'], self.statistics_['UBRE'] = self._estimate_GCV_UBRE(modelmat=modelmat, y=y, weights=weights) + self.statistics_['GCV'], self.statistics_['UBRE'] = self._estimate_GCV_UBRE( + modelmat=modelmat, y=y, weights=weights + ) self.statistics_['loglikelihood'] = self._loglikelihood(y, mu, weights=weights) - self.statistics_['deviance'] = self.distribution.deviance(y=y, mu=mu, weights=weights).sum() + self.statistics_['deviance'] = self.distribution.deviance( + y=y, mu=mu, weights=weights + ).sum() self.statistics_['p_values'] = self._estimate_p_values() def _estimate_AIC(self, y, mu, weights=None): @@ -1065,9 +1076,14 @@ def _estimate_AIC(self, y, mu, weights=None): ------- None """ - estimated_scale = not(self.distribution._known_scale) # if we estimate the scale, that adds 2 dof - return -2*self._loglikelihood(y=y, mu=mu, weights=weights) + \ - 2*self.statistics_['edof'] + 2*estimated_scale + estimated_scale = not ( + self.distribution._known_scale + ) # if we estimate the scale, that adds 2 dof + return ( + -2 * self._loglikelihood(y=y, mu=mu, weights=weights) + + 2 * self.statistics_['edof'] + + 2 * estimated_scale + ) def _estimate_AICc(self, y, mu, weights=None): """ @@ -1093,7 +1109,9 @@ def _estimate_AICc(self, y, mu, weights=None): edof = self.statistics_['edof'] if self.statistics_['AIC'] is None: self.statistics_['AIC'] = self._estimate_AIC(y, mu, weights) - return self.statistics_['AIC'] + 2*(edof + 1)*(edof + 2)/(y.shape[0] - edof -2) + return self.statistics_['AIC'] + 2 * (edof + 1) * (edof + 2) / ( + y.shape[0] - edof - 2 + ) def _estimate_r2(self, X=None, y=None, mu=None, weights=None): """ @@ -1131,14 +1149,15 @@ def _estimate_r2(self, X=None, y=None, mu=None, weights=None): full_ll = self._loglikelihood(y=y, mu=mu, weights=weights) r2 = OrderedDict() - r2['explained_deviance'] = 1. - full_d.sum()/null_d.sum() - r2['McFadden'] = full_ll/null_ll - r2['McFadden_adj'] = 1. - (full_ll - self.statistics_['edof'])/null_ll + r2['explained_deviance'] = 1.0 - full_d.sum() / null_d.sum() + r2['McFadden'] = full_ll / null_ll + r2['McFadden_adj'] = 1.0 - (full_ll - self.statistics_['edof']) / null_ll return r2 - def _estimate_GCV_UBRE(self, X=None, y=None, modelmat=None, gamma=1.4, - add_scale=True, weights=None): + def _estimate_GCV_UBRE( + self, X=None, y=None, modelmat=None, gamma=1.4, add_scale=True, weights=None + ): """ Generalized Cross Validation and Un-Biased Risk Estimator. @@ -1176,8 +1195,10 @@ def _estimate_GCV_UBRE(self, X=None, y=None, modelmat=None, gamma=1.4, see Wood 2006 pg. 177-182, 220 for more details. """ if gamma < 1: - raise ValueError('gamma scaling should be greater than 1, '\ - 'but found gamma = {}',format(gamma)) + raise ValueError( + 'gamma scaling should be greater than 1, ' 'but found gamma = {}', + format(gamma), + ) if modelmat is None: modelmat = self._modelmat(X) @@ -1193,20 +1214,23 @@ def _estimate_GCV_UBRE(self, X=None, y=None, modelmat=None, gamma=1.4, GCV = None UBRE = None - dev = self.distribution.deviance(mu=mu, y=y, scaled=False, weights=weights).sum() + dev = self.distribution.deviance( + mu=mu, y=y, scaled=False, weights=weights + ).sum() if self.distribution._known_scale: # scale is known, use UBRE scale = self.distribution.scale - UBRE = 1./n * dev - (~add_scale)*(scale) + 2.*gamma/n * edof * scale + UBRE = ( + 1.0 / n * dev - (~add_scale) * (scale) + 2.0 * gamma / n * edof * scale + ) else: # scale unkown, use GCV - GCV = (n * dev) / (n - gamma * edof)**2 + GCV = (n * dev) / (n - gamma * edof) ** 2 return (GCV, UBRE) def _estimate_p_values(self): - """estimate the p-values for all features - """ + """estimate the p-values for all features""" if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') @@ -1231,16 +1255,16 @@ def _compute_p_value(self, term_i): Notes ----- Wood 2006, section 4.8.5: - The p-values, calculated in this manner, behave correctly for un-penalized models, - or models with known smoothing parameters, but when smoothing parameters have - been estimated, the p-values are typically lower than they should be, meaning that - the tests reject the null too readily. + The p-values, calculated in this manner, behave correctly for un-penalized + models, or models with known smoothing parameters, but when smoothing + parameters have been estimated, the p-values are typically lower than they + should be, meaning that the tests reject the null too readily. (...) - In practical terms, if these p-values suggest that a term is not needed in a model, - then this is probably true, but if a term is deemed ‘significant’ it is important to be - aware that this significance may be overstated. + In practical terms, if these p-values suggest that a term is not needed in + a model, then this is probably true, but if a term is deemed ‘significant’ + it is important to be aware that this significance may be overstated. based on equations from Wood 2006 section 4.8.5 page 191 and errata https://people.maths.bris.ac.uk/~sw15190/igam/iGAMerrata-12.pdf @@ -1268,9 +1292,11 @@ def _compute_p_value(self, term_i): else: # if scale has been estimated, prefer to use f-statisitc score = score / rank - return 1 - sp.stats.f.cdf(score, rank, self.statistics_['n_samples'] - self.statistics_['edof']) + return 1 - sp.stats.f.cdf( + score, rank, self.statistics_['n_samples'] - self.statistics_['edof'] + ) - def confidence_intervals(self, X, width=.95, quantiles=None): + def confidence_intervals(self, X, width=0.95, quantiles=None): """estimate confidence intervals for the model. Parameters @@ -1290,21 +1316,35 @@ def confidence_intervals(self, X, width=.95, quantiles=None): Notes ----- Wood 2006, section 4.9 - Confidence intervals based on section 4.8 rely on large sample results to deal with - non-Gaussian distributions, and treat the smoothing parameters as fixed, when in - reality they are estimated from the data. + Confidence intervals based on section 4.8 rely on large sample results to + deal with non-Gaussian distributions, and treat the smoothing parameters as + fixed, when in reality they are estimated from the data. """ if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) return self._get_quantiles(X, width, quantiles, prediction=False) - def _get_quantiles(self, X, width, quantiles, modelmat=None, lp=None, - prediction=False, xform=True, term=-1): + def _get_quantiles( + self, + X, + width, + quantiles, + modelmat=None, + lp=None, + prediction=False, + xform=True, + term=-1, + ): """ estimate prediction intervals for LinearGAM @@ -1347,12 +1387,13 @@ def _get_quantiles(self, X, width, quantiles, modelmat=None, lp=None, if quantiles is not None: quantiles = np.atleast_1d(quantiles) else: - alpha = (1 - width)/2. + alpha = (1 - width) / 2.0 quantiles = [alpha, 1 - alpha] for quantile in quantiles: if (quantile >= 1) or (quantile <= 0): - raise ValueError('quantiles must be in (0, 1), but found {}'\ - .format(quantiles)) + raise ValueError( + 'quantiles must be in (0, 1), but found {}'.format(quantiles) + ) if modelmat is None: modelmat = self._modelmat(X, term=term) @@ -1371,8 +1412,10 @@ def _get_quantiles(self, X, width, quantiles, modelmat=None, lp=None, if self.distribution._known_scale: q = sp.stats.norm.ppf(quantile) else: - q = sp.stats.t.ppf(quantile, df=self.statistics_['n_samples'] - - self.statistics_['edof']) + q = sp.stats.t.ppf( + quantile, + df=self.statistics_['n_samples'] - self.statistics_['edof'], + ) lines.append(lp + q * var**0.5) lines = np.vstack(lines).T @@ -1447,9 +1490,9 @@ def generate_X_grid(self, term, n=100, meshgrid=False): if self.terms[term].istensor: Xs = [] for term_ in self.terms[term]: - Xs.append(np.linspace(term_.edge_knots_[0], - term_.edge_knots_[1], - num=n)) + Xs.append( + np.linspace(term_.edge_knots_[0], term_.edge_knots_[1], num=n) + ) Xs = np.meshgrid(*Xs, indexing='ij') if meshgrid: @@ -1459,9 +1502,9 @@ def generate_X_grid(self, term, n=100, meshgrid=False): # all other Terms elif hasattr(self.terms[term], 'edge_knots_'): - x = np.linspace(self.terms[term].edge_knots_[0], - self.terms[term].edge_knots_[1], - num=n) + x = np.linspace( + self.terms[term].edge_knots_[0], self.terms[term].edge_knots_[1], num=n + ) if meshgrid: return (x,) @@ -1470,7 +1513,7 @@ def generate_X_grid(self, term, n=100, meshgrid=False): X = np.zeros((n, self.statistics_['m_features'])) X[:, self.terms[term].feature] = x if getattr(self.terms[term], 'by', None) is not None: - X[:, self.terms[term].by] = 1. + X[:, self.terms[term].by] = 1.0 return X @@ -1478,8 +1521,9 @@ def generate_X_grid(self, term, n=100, meshgrid=False): else: raise TypeError('Unexpected term type: {}'.format(self.terms[term])) - def partial_dependence(self, term, X=None, width=None, quantiles=None, - meshgrid=False): + def partial_dependence( + self, term, X=None, width=None, quantiles=None, meshgrid=False + ): """ Computes the term functions for the GAM and possibly their confidence intervals. @@ -1543,8 +1587,11 @@ def partial_dependence(self, term, X=None, width=None, quantiles=None, # ensure term exists if (term >= len(self.terms)) or (term < -1): - raise ValueError('Term {} out of range for model with {} terms'\ - .format(term, len(self.terms))) + raise ValueError( + 'Term {} out of range for model with {} terms'.format( + term, len(self.terms) + ) + ) # cant do Intercept if self.terms[term].isintercept: @@ -1555,14 +1602,21 @@ def partial_dependence(self, term, X=None, width=None, quantiles=None, if meshgrid: if not isinstance(X, tuple): - raise ValueError('X must be a tuple of grids if `meshgrid=True`, '\ - 'but found X: {}'.format(X)) + raise ValueError( + 'X must be a tuple of grids if `meshgrid=True`, ' + 'but found X: {}'.format(X) + ) shape = X[0].shape X = self._flatten_mesh(X, term=term) - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) modelmat = self._modelmat(X, term=term) pdep = self._linear_predictor(modelmat=modelmat, term=term) @@ -1570,12 +1624,15 @@ def partial_dependence(self, term, X=None, width=None, quantiles=None, compute_quantiles = (width is not None) or (quantiles is not None) if compute_quantiles: - conf_intervals = self._get_quantiles(X, width=width, - quantiles=quantiles, - modelmat=modelmat, - lp=pdep, - term=term, - xform=False) + conf_intervals = self._get_quantiles( + X, + width=width, + quantiles=quantiles, + modelmat=modelmat, + lp=pdep, + term=term, + xform=False, + ) out += [conf_intervals] @@ -1612,29 +1669,98 @@ def summary(self): model_fmt = [ (self.__class__.__name__, 'model_details', width_details), - ('', 'model_results', width_results) - ] + ('', 'model_results', width_results), + ] model_details = [] objective = 'UBRE' if self.distribution._known_scale else 'GCV' - model_details.append({'model_details': space_row('Distribution:', self.distribution.__class__.__name__, total_width=width_details), - 'model_results': space_row('Effective DoF:', str(np.round(self.statistics_['edof'], 4)), total_width=width_results)}) - model_details.append({'model_details': space_row('Link Function:', self.link.__class__.__name__, total_width=width_details), - 'model_results': space_row('Log Likelihood:', str(np.round(self.statistics_['loglikelihood'], 4)), total_width=width_results)}) - model_details.append({'model_details': space_row('Number of Samples:', str(self.statistics_['n_samples']), total_width=width_details), - 'model_results': space_row('AIC: ', str(np.round(self.statistics_['AIC'], 4)), total_width=width_results)}) - model_details.append({'model_results': space_row('AICc: ', str(np.round(self.statistics_['AICc'], 4)), total_width=width_results)}) - model_details.append({'model_results': space_row(objective + ':', str(np.round(self.statistics_[objective], 4)), total_width=width_results)}) - model_details.append({'model_results': space_row('Scale:', str(np.round(self.statistics_['scale'], 4)), total_width=width_results)}) - model_details.append({'model_results': space_row('Pseudo R-Squared:', str(np.round(self.statistics_['pseudo_r2']['explained_deviance'], 4)), total_width=width_results)}) + model_details.append( + { + 'model_details': space_row( + 'Distribution:', + self.distribution.__class__.__name__, + total_width=width_details, + ), + 'model_results': space_row( + 'Effective DoF:', + str(np.round(self.statistics_['edof'], 4)), + total_width=width_results, + ), + } + ) + model_details.append( + { + 'model_details': space_row( + 'Link Function:', + self.link.__class__.__name__, + total_width=width_details, + ), + 'model_results': space_row( + 'Log Likelihood:', + str(np.round(self.statistics_['loglikelihood'], 4)), + total_width=width_results, + ), + } + ) + model_details.append( + { + 'model_details': space_row( + 'Number of Samples:', + str(self.statistics_['n_samples']), + total_width=width_details, + ), + 'model_results': space_row( + 'AIC: ', + str(np.round(self.statistics_['AIC'], 4)), + total_width=width_results, + ), + } + ) + model_details.append( + { + 'model_results': space_row( + 'AICc: ', + str(np.round(self.statistics_['AICc'], 4)), + total_width=width_results, + ) + } + ) + model_details.append( + { + 'model_results': space_row( + objective + ':', + str(np.round(self.statistics_[objective], 4)), + total_width=width_results, + ) + } + ) + model_details.append( + { + 'model_results': space_row( + 'Scale:', + str(np.round(self.statistics_['scale'], 4)), + total_width=width_results, + ) + } + ) + model_details.append( + { + 'model_results': space_row( + 'Pseudo R-Squared:', + str( + np.round(self.statistics_['pseudo_r2']['explained_deviance'], 4) + ), + total_width=width_results, + ) + } + ) # term summary data = [] for i, term in enumerate(self.terms): - # TODO bug: if the number of samples is less than the number of coefficients # we cant get the edof per term if len(self.statistics_['edof_per_coef']) == len(self.coef_): @@ -1644,13 +1770,13 @@ def summary(self): edof = '' term_data = { - 'feature_func': repr(term), - 'lam': '' if term.isintercept else np.round(flatten(term.lam), 4), - 'rank': '{}'.format(term.n_coefs), - 'edof': '{}'.format(edof), - 'p_value': '%.2e'%(self.statistics_['p_values'][i]), - 'sig_code': sig_code(self.statistics_['p_values'][i]) - } + 'feature_func': repr(term), + 'lam': '' if term.isintercept else np.round(flatten(term.lam), 4), + 'rank': '{}'.format(term.n_coefs), + 'edof': '{}'.format(edof), + 'p_value': '%.2e' % (self.statistics_['p_values'][i]), + 'sig_code': sig_code(self.statistics_['p_values'][i]), + } data.append(term_data) @@ -1660,33 +1786,47 @@ def summary(self): ('Rank', 'rank', 12), ('EDoF', 'edof', 12), ('P > x', 'p_value', 12), - ('Sig. Code', 'sig_code', 12) - ] + ('Sig. Code', 'sig_code', 12), + ] - print( TablePrinter(model_fmt, ul='=', sep=' ')(model_details) ) - print("="*106) - print( TablePrinter(fmt, ul='=')(data) ) - print("="*106) + print(TablePrinter(model_fmt, ul='=', sep=' ')(model_details)) + print("=" * 106) + print(TablePrinter(fmt, ul='=')(data)) + print("=" * 106) print("Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1") print() - print("WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\n" \ - " which can cause p-values to appear significant when they are not.") + print( + "WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\n" # noqa: E501 + " which can cause p-values to appear significant when they are not." + ) print() - print("WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\n" \ - " known smoothing parameters, but when smoothing parameters have been estimated, the p-values\n" \ - " are typically lower than they should be, meaning that the tests reject the null too readily.") + print( + "WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\n" # noqa: E501 + " known smoothing parameters, but when smoothing parameters have been estimated, the p-values\n" # noqa: E501 + " are typically lower than they should be, meaning that the tests reject the null too readily." # noqa: E501 + ) # P-VALUE BUG - warnings.warn("KNOWN BUG: p-values computed in this summary are likely "\ - "much smaller than they should be. \n \n"\ - "Please do not make inferences based on these values! \n\n"\ - "Collaborate on a solution, and stay up to date at: \n"\ - "github.com/dswah/pyGAM/issues/163 \n", stacklevel=2) - - - def gridsearch(self, X, y, weights=None, return_scores=False, - keep_best=True, objective='auto', progress=True, - **param_grids): + warnings.warn( + "KNOWN BUG: p-values computed in this summary are likely " + "much smaller than they should be. \n \n" + "Please do not make inferences based on these values! \n\n" + "Collaborate on a solution, and stay up to date at: \n" + "github.com/dswah/pyGAM/issues/163 \n", + stacklevel=2, + ) + + def gridsearch( + self, + X, + y, + weights=None, + return_scores=False, + keep_best=True, + objective='auto', + progress=True, + **param_grids, + ): """ Performs a grid search over a space of parameters for a given objective @@ -1801,30 +1941,33 @@ def gridsearch(self, X, y, weights=None, return_scores=False, if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') # validate objective if objective not in ['auto', 'GCV', 'UBRE', 'AIC', 'AICc']: - raise ValueError("objective mut be in "\ - "['auto', 'GCV', 'UBRE', 'AIC', 'AICc'], '\ - 'but found objective = {}".format(objective)) + raise ValueError( + "objective mut be in " + "['auto', 'GCV', 'UBRE', 'AIC', 'AICc'], '\ + 'but found objective = {}".format( + objective + ) + ) # check objective if self.distribution._known_scale: if objective == 'GCV': - raise ValueError('GCV should be used for models with'\ - 'unknown scale') + raise ValueError('GCV should be used for models with' 'unknown scale') if objective == 'auto': objective = 'UBRE' else: if objective == 'UBRE': - raise ValueError('UBRE should be used for models with '\ - 'known scale') + raise ValueError('UBRE should be used for models with ' 'known scale') if objective == 'auto': objective = 'GCV' @@ -1837,32 +1980,36 @@ def gridsearch(self, X, y, weights=None, return_scores=False, params = [] grids = [] for param, grid in list(param_grids.items()): - # check param exists if param not in (admissible_params): raise ValueError('unknown parameter: {}'.format(param)) # check grid is iterable at all - if not (isiterable(grid) and (len(grid) > 1)): \ - raise ValueError('{} grid must either be iterable of ' - 'iterables, or an iterable of lengnth > 1, '\ - 'but found {}'.format(param, grid)) + if not (isiterable(grid) and (len(grid) > 1)): + raise ValueError( + '{} grid must either be iterable of ' + 'iterables, or an iterable of lengnth > 1, ' + 'but found {}'.format(param, grid) + ) # prepare grid if any(isiterable(g) for g in grid): - # get required parameter shape target_len = len(flatten(getattr(self, param))) # check if cartesian product needed - cartesian = (not isinstance(grid, np.ndarray) or grid.ndim != 2) + cartesian = not isinstance(grid, np.ndarray) or grid.ndim != 2 # build grid grid = [np.atleast_1d(g) for g in grid] # check chape - msg = '{} grid should have {} columns, '\ - 'but found grid with {} columns'.format(param, target_len, len(grid)) + msg = ( + '{} grid should have {} columns, ' + 'but found grid with {} columns'.format( + param, target_len, len(grid) + ) + ) if cartesian: if len(grid) != target_len: raise ValueError(msg) @@ -1878,10 +2025,10 @@ def gridsearch(self, X, y, weights=None, return_scores=False, # build a list of dicts of candidate model params param_grid_list = [] for candidate in combine(*grids): - param_grid_list.append(dict(zip(params,candidate))) + param_grid_list.append(dict(zip(params, candidate))) # set up data collection - best_model = None # keep the best model + best_model = None # keep the best model best_score = np.inf scores = [] models = [] @@ -1899,7 +2046,9 @@ def gridsearch(self, X, y, weights=None, return_scores=False, if progress: pbar = ProgressBar() else: - pbar = lambda x: x + + def pbar(x): + return x # loop through candidate model params for param_grid in pbar(param_grid_list): @@ -1941,16 +2090,23 @@ def gridsearch(self, X, y, weights=None, return_scores=False, # copy over the best if keep_best: - self.set_params(deep=True, - force=True, - **best_model.get_params(deep=True)) + self.set_params(deep=True, force=True, **best_model.get_params(deep=True)) if return_scores: return OrderedDict(zip(models, scores)) else: return self - def sample(self, X, y, quantity='y', sample_at_X=None, - weights=None, n_draws=100, n_bootstraps=5, objective='auto'): + def sample( + self, + X, + y, + quantity='y', + sample_at_X=None, + weights=None, + n_draws=100, + n_bootstraps=5, + objective='auto', + ): """Simulate from the posterior of the coefficients and smoothing params. Samples are drawn from the posterior of the coefficients and smoothing @@ -2045,12 +2201,19 @@ def sample(self, X, y, quantity='y', sample_at_X=None, R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257). """ if quantity not in {'mu', 'coef', 'y'}: - raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';" - " got {}".format(quantity)) + raise ValueError( + "`quantity` must be one of 'mu', 'coef', 'y';" + " got {}".format(quantity) + ) coef_draws = self._sample_coef( - X, y, weights=weights, n_draws=n_draws, - n_bootstraps=n_bootstraps, objective=objective) + X, + y, + weights=weights, + n_draws=n_draws, + n_bootstraps=n_bootstraps, + objective=objective, + ) if quantity == 'coef': return coef_draws @@ -2060,14 +2223,16 @@ def sample(self, X, y, quantity='y', sample_at_X=None, linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T) mu_shape_n_draws_by_n_samples = self.link.mu( - linear_predictor, self.distribution).T + linear_predictor, self.distribution + ).T if quantity == 'mu': return mu_shape_n_draws_by_n_samples else: return self.distribution.sample(mu_shape_n_draws_by_n_samples) - def _sample_coef(self, X, y, weights=None, n_draws=100, n_bootstraps=1, - objective='auto'): + def _sample_coef( + self, X, y, weights=None, n_draws=100, n_bootstraps=1, objective='auto' + ): """Simulate from the posterior of the coefficients. NOTE: A `gridsearch` is done `n_bootstraps` many times, so keep @@ -2117,23 +2282,24 @@ def _sample_coef(self, X, y, weights=None, n_draws=100, n_bootstraps=1, if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') if n_bootstraps < 1: - raise ValueError('n_bootstraps must be >= 1;' - ' got {}'.format(n_bootstraps)) + raise ValueError( + 'n_bootstraps must be >= 1;' ' got {}'.format(n_bootstraps) + ) if n_draws < 1: - raise ValueError('n_draws must be >= 1;' - ' got {}'.format(n_draws)) + raise ValueError('n_draws must be >= 1;' ' got {}'.format(n_draws)) - coef_bootstraps, cov_bootstraps = ( - self._bootstrap_samples_of_smoothing(X, y, weights=weights, - n_bootstraps=n_bootstraps, - objective=objective)) + coef_bootstraps, cov_bootstraps = self._bootstrap_samples_of_smoothing( + X, y, weights=weights, n_bootstraps=n_bootstraps, objective=objective + ) coef_draws = self._simulate_coef_from_bootstraps( - n_draws, coef_bootstraps, cov_bootstraps) + n_draws, coef_bootstraps, cov_bootstraps + ) return coef_draws - def _bootstrap_samples_of_smoothing(self, X, y, weights=None, - n_bootstraps=1, objective='auto'): + def _bootstrap_samples_of_smoothing( + self, X, y, weights=None, n_bootstraps=1, objective='auto' + ): """Sample the smoothing parameters using simulated response data. @@ -2144,8 +2310,7 @@ def _bootstrap_samples_of_smoothing(self, X, y, weights=None, """ mu = self.predict_mu(X) # Wood pg. 198 step 1 coef_bootstraps = [self.coef_] - cov_bootstraps = [ - load_diagonal(self.statistics_['cov'])] + cov_bootstraps = [load_diagonal(self.statistics_['cov'])] for _ in range(n_bootstraps - 1): # Wood pg. 198 step 2 # generate response data from fitted model (Wood pg. 198 step 3) @@ -2166,8 +2331,9 @@ def _bootstrap_samples_of_smoothing(self, X, y, weights=None, # with all values in [1e-3, 1e3] lam_grid = np.random.randn(11, len(flatten(self.lam))) * 6 - 3 lam_grid = np.exp(lam_grid) - gam.gridsearch(X, y_bootstrap, weights=weights, lam=lam_grid, - objective=objective) + gam.gridsearch( + X, y_bootstrap, weights=weights, lam=lam_grid, objective=objective + ) lam = gam.lam # fit coefficients on the original data given the smoothing params @@ -2184,13 +2350,13 @@ def _bootstrap_samples_of_smoothing(self, X, y, weights=None, cov_bootstraps.append(cov) return coef_bootstraps, cov_bootstraps - def _simulate_coef_from_bootstraps( - self, n_draws, coef_bootstraps, cov_bootstraps): + def _simulate_coef_from_bootstraps(self, n_draws, coef_bootstraps, cov_bootstraps): """Simulate coefficients using bootstrap samples.""" # Sample indices uniformly from {0, ..., n_bootstraps - 1} # (Wood pg. 199 step 6) random_bootstrap_indices = np.random.choice( - np.arange(len(coef_bootstraps)), size=n_draws, replace=True) + np.arange(len(coef_bootstraps)), size=n_draws, replace=True + ) # Simulate `n_draws` many random coefficient vectors from a # multivariate normal distribution with mean and covariance given by @@ -2208,8 +2374,10 @@ def _simulate_coef_from_bootstraps( for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( - coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], - size=len(draw_indices)) + coef_bootstraps[bootstrap], + cov_bootstraps[bootstrap], + size=len(draw_indices), + ) return coef_draws @@ -2278,18 +2446,29 @@ class LinearGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - scale=None, callbacks=['deviance', 'diffs'], - fit_intercept=True, verbose=False, **kwargs): + + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + scale=None, + callbacks=['deviance', 'diffs'], + fit_intercept=True, + verbose=False, + **kwargs, + ): self.scale = scale - super(LinearGAM, self).__init__(terms=terms, - distribution=NormalDist(scale=self.scale), - link='identity', - max_iter=max_iter, - tol=tol, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(LinearGAM, self).__init__( + terms=terms, + distribution=NormalDist(scale=self.scale), + link='identity', + max_iter=max_iter, + tol=tol, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) self._exclude += ['distribution', 'link'] @@ -2308,7 +2487,7 @@ def _validate_params(self): self.distribution = NormalDist(scale=self.scale) super(LinearGAM, self)._validate_params() - def prediction_intervals(self, X, width=.95, quantiles=None): + def prediction_intervals(self, X, width=0.95, quantiles=None): """ estimate prediction intervals for LinearGAM @@ -2328,12 +2507,18 @@ def prediction_intervals(self, X, width=.95, quantiles=None): if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) return self._get_quantiles(X, width, quantiles, prediction=True) + class LogisticGAM(GAM): """Logistic GAM @@ -2398,20 +2583,29 @@ class LogisticGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - callbacks=['deviance', 'diffs', 'accuracy'], - fit_intercept=True, verbose=False, **kwargs): + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + callbacks=['deviance', 'diffs', 'accuracy'], + fit_intercept=True, + verbose=False, + **kwargs, + ): # call super - super(LogisticGAM, self).__init__(terms=terms, - distribution='binomial', - link='logit', - max_iter=max_iter, - tol=tol, - callbacks=callbacks, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(LogisticGAM, self).__init__( + terms=terms, + distribution='binomial', + link='logit', + max_iter=max_iter, + tol=tol, + callbacks=callbacks, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) # ignore any variables self._exclude += ['distribution', 'link'] @@ -2439,9 +2633,14 @@ def accuracy(self, X=None, y=None, mu=None): y = check_y(y, self.link, self.distribution, verbose=self.verbose) if X is not None: - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) if mu is None: mu = self.predict_mu(X) @@ -2449,8 +2648,7 @@ def accuracy(self, X=None, y=None, mu=None): return ((mu > 0.5).astype(int) == y).mean() def score(self, X, y): - """ - method to compute the accuracy for a trained model for a given X data and y labels + """compute the accuracy for a trained model for given X data and y labels Parameters ---------- @@ -2564,20 +2762,29 @@ class PoissonGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - callbacks=['deviance', 'diffs'], - fit_intercept=True, verbose=False, **kwargs): + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + callbacks=['deviance', 'diffs'], + fit_intercept=True, + verbose=False, + **kwargs, + ): # call super - super(PoissonGAM, self).__init__(terms=terms, - distribution='poisson', - link='log', - max_iter=max_iter, - tol=tol, - callbacks=callbacks, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(PoissonGAM, self).__init__( + terms=terms, + distribution='poisson', + link='log', + max_iter=max_iter, + tol=tol, + callbacks=callbacks, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) # ignore any variables self._exclude += ['distribution', 'link'] @@ -2634,8 +2841,9 @@ def loglikelihood(self, X, y, exposure=None, weights=None): if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') @@ -2668,8 +2876,9 @@ def _exposure_to_weights(self, y, exposure=None, weights=None): if exposure is not None: exposure = np.array(exposure).astype('f').ravel() - exposure = check_array(exposure, name='sample exposure', - ndim=1, verbose=self.verbose) + exposure = check_array( + exposure, name='sample exposure', ndim=1, verbose=self.verbose + ) else: exposure = np.ones_like(y.ravel()).astype('float64') @@ -2682,8 +2891,9 @@ def _exposure_to_weights(self, y, exposure=None, weights=None): if weights is not None: weights = np.array(weights).astype('f').ravel() - weights = check_array(weights, name='sample weights', - ndim=1, verbose=self.verbose) + weights = check_array( + weights, name='sample weights', ndim=1, verbose=self.verbose + ) else: weights = np.ones_like(y).astype('float64') check_lengths(weights, exposure) @@ -2747,9 +2957,14 @@ def predict(self, X, exposure=None): if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') - X = check_X(X, n_feats=self.statistics_['m_features'], - edge_knots=self.edge_knots_, dtypes=self.dtype, - features=self.feature, verbose=self.verbose) + X = check_X( + X, + n_feats=self.statistics_['m_features'], + edge_knots=self.edge_knots_, + dtypes=self.dtype, + features=self.feature, + verbose=self.verbose, + ) if exposure is not None: exposure = np.array(exposure).astype('f') @@ -2759,9 +2974,17 @@ def predict(self, X, exposure=None): return self.predict_mu(X) * exposure - def gridsearch(self, X, y, exposure=None, weights=None, - return_scores=False, keep_best=True, objective='auto', - **param_grids): + def gridsearch( + self, + X, + y, + exposure=None, + weights=None, + return_scores=False, + keep_best=True, + objective='auto', + **param_grids, + ): """ performs a grid search over a space of parameters for a given objective @@ -2828,12 +3051,15 @@ def gridsearch(self, X, y, exposure=None, weights=None, self, ie possibly the newly fitted model """ y, weights = self._exposure_to_weights(y, exposure, weights) - return super(PoissonGAM, self).gridsearch(X, y, - weights=weights, - return_scores=return_scores, - keep_best=keep_best, - objective=objective, - **param_grids) + return super(PoissonGAM, self).gridsearch( + X, + y, + weights=weights, + return_scores=return_scores, + keep_best=keep_best, + objective=objective, + **param_grids, + ) class GammaGAM(GAM): @@ -2912,19 +3138,30 @@ class GammaGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - scale=None, callbacks=['deviance', 'diffs'], - fit_intercept=True, verbose=False, **kwargs): + + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + scale=None, + callbacks=['deviance', 'diffs'], + fit_intercept=True, + verbose=False, + **kwargs, + ): self.scale = scale - super(GammaGAM, self).__init__(terms=terms, - distribution=GammaDist(scale=self.scale), - link='log', - max_iter=max_iter, - tol=tol, - callbacks=callbacks, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(GammaGAM, self).__init__( + terms=terms, + distribution=GammaDist(scale=self.scale), + link='log', + max_iter=max_iter, + tol=tol, + callbacks=callbacks, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) self._exclude += ['distribution', 'link'] @@ -2950,10 +3187,10 @@ class InvGaussGAM(GAM): This is a GAM with a Inverse Gaussian error distribution, and a log link. NB - Although canonical link function for the Inverse Gaussian GLM is the inverse squared link, - this function can create problems for numerical software because it becomes - difficult to enforce the requirement that the mean of the Inverse Gaussian distribution - be positive. The log link guarantees this. + Although canonical link function for the Inverse Gaussian GLM is the inverse squared + link, this function can create problems for numerical software because it becomes + difficult to enforce the requirement that the mean of the Inverse Gaussian + distribution be positive. The log link guarantees this. If you need to use the inverse squared link function, simply construct a custom GAM: @@ -3020,19 +3257,30 @@ class InvGaussGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - scale=None, callbacks=['deviance', 'diffs'], - fit_intercept=True, verbose=False, **kwargs): + + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + scale=None, + callbacks=['deviance', 'diffs'], + fit_intercept=True, + verbose=False, + **kwargs, + ): self.scale = scale - super(InvGaussGAM, self).__init__(terms=terms, - distribution=InvGaussDist(scale=self.scale), - link='log', - max_iter=max_iter, - tol=tol, - callbacks=callbacks, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(InvGaussGAM, self).__init__( + terms=terms, + distribution=InvGaussDist(scale=self.scale), + link='log', + max_iter=max_iter, + tol=tol, + callbacks=callbacks, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) self._exclude += ['distribution', 'link'] @@ -3118,20 +3366,32 @@ class ExpectileGAM(GAM): International Biometric Society: A Crash Course on P-splines http://www.ibschannel2015.nl/project/userfiles/Crash_course_handout.pdf """ - def __init__(self, terms='auto', max_iter=100, tol=1e-4, - scale=None, callbacks=['deviance', 'diffs'], - fit_intercept=True, expectile=0.5, verbose=False, **kwargs): + + def __init__( + self, + terms='auto', + max_iter=100, + tol=1e-4, + scale=None, + callbacks=['deviance', 'diffs'], + fit_intercept=True, + expectile=0.5, + verbose=False, + **kwargs, + ): self.scale = scale self.expectile = expectile - super(ExpectileGAM, self).__init__(terms=terms, - distribution=NormalDist(scale=self.scale), - link='identity', - max_iter=max_iter, - tol=tol, - callbacks=callbacks, - fit_intercept=fit_intercept, - verbose=verbose, - **kwargs) + super(ExpectileGAM, self).__init__( + terms=terms, + distribution=NormalDist(scale=self.scale), + link='identity', + max_iter=max_iter, + tol=tol, + callbacks=callbacks, + fit_intercept=fit_intercept, + verbose=verbose, + **kwargs, + ) self._exclude += ['distribution', 'link'] @@ -3148,7 +3408,9 @@ def _validate_params(self): None """ if self.expectile >= 1 or self.expectile <= 0: - raise ValueError('expectile must be in (0,1), but found {}'.format(self.expectile)) + raise ValueError( + 'expectile must be in (0,1), but found {}'.format(self.expectile) + ) self.distribution = NormalDist(scale=self.scale) super(ExpectileGAM, self)._validate_params() @@ -3162,8 +3424,8 @@ def _W(self, mu, weights, y=None): this makes me think that they are equivalent. - also, using non-sqrt mu with stable opt gives very small edofs for even lam=0.001 - and the parameter variance is huge. this seems strange to me. + also, using non-sqrt mu with stable opt gives very small edofs + for even lam=0.001 and the parameter variance is huge. this seems strange to me. computed [V * d(link)/d(mu)] ^(-1/2) by hand and the math checks out as hoped. @@ -3185,9 +3447,15 @@ def _W(self, mu, weights, y=None): # asymmetric weight asym = (y > mu) * self.expectile + (y <= mu) * (1 - self.expectile) - return sp.sparse.diags((self.link.gradient(mu, self.distribution)**2 * - self.distribution.V(mu=mu) * - weights ** -1)**-0.5 * asym**0.5) + return sp.sparse.diags( + ( + self.link.gradient(mu, self.distribution) ** 2 + * self.distribution.V(mu=mu) + * weights**-1 + ) + ** -0.5 + * asym**0.5 + ) def _get_quantile_ratio(self, X, y): """find the expirical quantile of the model @@ -3235,12 +3503,15 @@ def fit_quantile(self, X, y, quantile, max_iter=20, tol=0.01, weights=None): ------- self : fitted GAM object """ + def _within_tol(a, b, tol): return np.abs(a - b) <= tol # validate arguments if quantile <= 0 or quantile >= 1: - raise ValueError('quantile must be on (0, 1), but found {}'.format(quantile)) + raise ValueError( + 'quantile must be on (0, 1), but found {}'.format(quantile) + ) if tol <= 0: raise ValueError('tol must be float > 0 {}'.format(tol)) @@ -3267,7 +3538,7 @@ def _within_tol(a, b, tol): else: max_ = self.expectile - expectile = (max_ + min_) / 2. + expectile = (max_ + min_) / 2.0 self.set_params(expectile=expectile) self.fit(X, y, weights=weights) diff --git a/pygam/terms.py b/pygam/terms.py index 117584b4..8d692964 100644 --- a/pygam/terms.py +++ b/pygam/terms.py @@ -12,16 +12,31 @@ import scipy as sp from pygam.core import Core, nice_repr -from pygam.utils import isiterable, check_param, flatten, gen_edge_knots, b_spline_basis, tensor_product +from pygam.utils import ( + isiterable, + check_param, + flatten, + gen_edge_knots, + b_spline_basis, + tensor_product, +) from pygam.penalties import PENALTIES, CONSTRAINTS class Term(Core): __metaclass__ = ABCMeta - def __init__(self, feature, lam=0.6, dtype='numerical', - fit_linear=False, fit_splines=True, - penalties='auto', constraints=None, - verbose=False): + + def __init__( + self, + feature, + lam=0.6, + dtype='numerical', + fit_linear=False, + fit_splines=True, + penalties='auto', + constraints=None, + verbose=False, + ): """creates an instance of a Term Parameters @@ -92,7 +107,7 @@ def __init__(self, feature, lam=0.6, dtype='numerical', self.constraints = constraints self.verbose = verbose - if not(hasattr(self, '_name')): + if not (hasattr(self, '_name')): self._name = 'term' super(Term, self).__init__(name=self._name) @@ -123,10 +138,14 @@ def __repr__(self): features = [] if self.feature is None else self.feature features = np.atleast_1d(features).tolist() - return nice_repr(name, {}, - line_width=self._line_width, - line_offset=self._line_offset, - decimals=4, args=features) + return nice_repr( + name, + {}, + line_width=self._line_width, + line_offset=self._line_offset, + decimals=4, + args=features, + ) def _validate_arguments(self): """method to sanitize model parameters @@ -141,25 +160,32 @@ def _validate_arguments(self): """ # dtype if self.dtype not in ['numerical', 'categorical']: - raise ValueError("dtype must be in ['numerical','categorical'], "\ - "but found dtype = {}".format(self.dtype)) + raise ValueError( + "dtype must be in ['numerical','categorical'], " + "but found dtype = {}".format(self.dtype) + ) # fit_linear XOR fit_splines if self.fit_linear == self.fit_splines: - raise ValueError('term must have fit_linear XOR fit_splines, but found: ' - 'fit_linear= {}, fit_splines={}'.format(self.fit_linear, self.fit_splines)) + raise ValueError( + 'term must have fit_linear XOR fit_splines, but found: ' + 'fit_linear= {}, fit_splines={}'.format( + self.fit_linear, self.fit_splines + ) + ) # penalties if not isiterable(self.penalties): self.penalties = [self.penalties] for i, p in enumerate(self.penalties): - if not (hasattr(p, '__call__') or - (p in PENALTIES) or - (p is None)): - raise ValueError("penalties must be callable or in "\ - "{}, but found {} for {}th penalty"\ - .format(list(PENALTIES.keys()), p, i)) + if not (hasattr(p, '__call__') or (p in PENALTIES) or (p is None)): + raise ValueError( + "penalties must be callable or in " + "{}, but found {} for {}th penalty".format( + list(PENALTIES.keys()), p, i + ) + ) # check lams and distribute to penalites if not isiterable(self.lam): @@ -172,20 +198,23 @@ def _validate_arguments(self): self.lam = self.lam * len(self.penalties) if len(self.lam) != len(self.penalties): - raise ValueError('expected 1 lam per penalty, but found '\ - 'lam = {}, penalties = {}'.format(self.lam, self.penalties)) + raise ValueError( + 'expected 1 lam per penalty, but found ' + 'lam = {}, penalties = {}'.format(self.lam, self.penalties) + ) # constraints if not isiterable(self.constraints): self.constraints = [self.constraints] for i, c in enumerate(self.constraints): - if not (hasattr(c, '__call__') or - (c in CONSTRAINTS) or - (c is None)): - raise ValueError("constraints must be callable or in "\ - "{}, but found {} for {}th constraint"\ - .format(list(CONSTRAINTS.keys()), c, i)) + if not (hasattr(c, '__call__') or (c in CONSTRAINTS) or (c is None)): + raise ValueError( + "constraints must be callable or in " + "{}, but found {} for {}th constraint".format( + list(CONSTRAINTS.keys()), c, i + ) + ) return self @@ -239,15 +268,13 @@ def build_from_info(cls, info): @property def hasconstraint(self): - """bool, whether the term has any constraints - """ + """bool, whether the term has any constraints""" return np.not_equal(np.atleast_1d(self.constraints), None).any() @property @abstractproperty def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" pass @abstractmethod @@ -306,7 +333,7 @@ def build_penalties(self, verbose=False): P : sparse CSC matrix containing the model penalties in quadratic form """ if self.isintercept: - return np.array([[0.]]) + return np.array([[0.0]]) Ps = [] for penalty, lam in zip(self.penalties, self.lam): @@ -326,7 +353,7 @@ def build_penalties(self, verbose=False): if penalty in PENALTIES: penalty = PENALTIES[penalty] - P = penalty(self.n_coefs, coef=None) # penalties dont need coef + P = penalty(self.n_coefs, coef=None) # penalties dont need coef Ps.append(np.multiply(P, lam)) return np.sum(Ps) @@ -357,11 +384,10 @@ def build_constraints(self, coef, constraint_lam, constraint_l2): C : sparse CSC matrix containing the model constraints in quadratic form """ if self.isintercept: - return np.array([[0.]]) + return np.array([[0.0]]) Cs = [] for constraint in self.constraints: - if constraint is None: constraint = 'none' if constraint in CONSTRAINTS: @@ -378,6 +404,7 @@ def build_constraints(self, coef, constraint_lam, constraint_l2): return Cs + class Intercept(Term): def __init__(self, verbose=False): """creates an instance of an Intercept term @@ -405,9 +432,25 @@ def __init__(self, verbose=False): self._name = 'intercept_term' self._minimal_name = 'intercept' - super(Intercept, self).__init__(feature=None, fit_linear=False, fit_splines=False, lam=None, penalties=None, constraints=None, verbose=verbose) - - self._exclude += ['fit_splines', 'fit_linear', 'lam', 'penalties', 'constraints', 'feature', 'dtype'] + super(Intercept, self).__init__( + feature=None, + fit_linear=False, + fit_splines=False, + lam=None, + penalties=None, + constraints=None, + verbose=verbose, + ) + + self._exclude += [ + 'fit_splines', + 'fit_linear', + 'lam', + 'penalties', + 'constraints', + 'feature', + 'dtype', + ] self._args = [] def __repr__(self): @@ -428,8 +471,7 @@ def _validate_arguments(self): @property def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" return 1 def compile(self, X, verbose=False): @@ -515,17 +557,21 @@ def __init__(self, feature, lam=0.6, penalties='auto', verbose=False): """ self._name = 'linear_term' self._minimal_name = 'l' - super(LinearTerm, self).__init__(feature=feature, lam=lam, - penalties=penalties, - constraints=None, dtype='numerical', - fit_linear=True, fit_splines=False, - verbose=verbose) + super(LinearTerm, self).__init__( + feature=feature, + lam=lam, + penalties=penalties, + constraints=None, + dtype='numerical', + fit_linear=True, + fit_splines=False, + verbose=verbose, + ) self._exclude += ['fit_splines', 'fit_linear', 'dtype', 'constraints'] @property def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" return 1 def compile(self, X, verbose=False): @@ -544,13 +590,14 @@ def compile(self, X, verbose=False): None """ if self.feature >= X.shape[1]: - raise ValueError('term requires feature {}, '\ - 'but X has only {} dimensions'\ - .format(self.feature, X.shape[1])) - - self.edge_knots_ = gen_edge_knots(X[:, self.feature], - self.dtype, - verbose=verbose) + raise ValueError( + 'term requires feature {}, ' + 'but X has only {} dimensions'.format(self.feature, X.shape[1]) + ) + + self.edge_knots_ = gen_edge_knots( + X[:, self.feature], self.dtype, verbose=verbose + ) return self def build_columns(self, X, verbose=False): @@ -573,9 +620,21 @@ def build_columns(self, X, verbose=False): class SplineTerm(Term): _bases = ['ps', 'cp'] - def __init__(self, feature, n_splines=20, spline_order=3, lam=0.6, - penalties='auto', constraints=None, dtype='numerical', - basis='ps', by=None, edge_knots=None, verbose=False): + + def __init__( + self, + feature, + n_splines=20, + spline_order=3, + lam=0.6, + penalties='auto', + constraints=None, + dtype='numerical', + basis='ps', + by=None, + edge_knots=None, + verbose=False, + ): """creates an instance of a SplineTerm Parameters @@ -625,7 +684,7 @@ def __init__(self, feature, n_splines=20, spline_order=3, lam=0.6, Type of basis function to use in the term. 'ps' : p-spline basis - + 'cp' : cyclic p-spline basis, useful for building periodic functions. by default, the maximum and minimum of the feature values are used to determine the function's period. @@ -676,14 +735,16 @@ def __init__(self, feature, n_splines=20, spline_order=3, lam=0.6, if edge_knots is not None: self.edge_knots_ = edge_knots - super(SplineTerm, self).__init__(feature=feature, - lam=lam, - penalties=penalties, - constraints=constraints, - fit_linear=False, - fit_splines=True, - dtype=dtype, - verbose=verbose) + super(SplineTerm, self).__init__( + feature=feature, + lam=lam, + penalties=penalties, + constraints=constraints, + fit_linear=False, + fit_splines=True, + dtype=dtype, + verbose=verbose, + ) self._exclude += ['fit_linear', 'fit_splines'] @@ -701,36 +762,41 @@ def _validate_arguments(self): super(SplineTerm, self)._validate_arguments() if self.basis not in self._bases: - raise ValueError("basis must be one of {}, "\ - "but found: {}".format(self._bases, self.basis)) + raise ValueError( + "basis must be one of {}, " + "but found: {}".format(self._bases, self.basis) + ) # n_splines - self.n_splines = check_param(self.n_splines, param_name='n_splines', - dtype='int', constraint='>= 0') + self.n_splines = check_param( + self.n_splines, param_name='n_splines', dtype='int', constraint='>= 0' + ) # spline_order - self.spline_order = check_param(self.spline_order, - param_name='spline_order', - dtype='int', constraint='>= 0') + self.spline_order = check_param( + self.spline_order, param_name='spline_order', dtype='int', constraint='>= 0' + ) # n_splines + spline_order if not self.n_splines > self.spline_order: - raise ValueError('n_splines must be > spline_order. '\ - 'found: n_splines = {} and spline_order = {}'\ - .format(self.n_splines, self.spline_order)) + raise ValueError( + 'n_splines must be > spline_order. ' + 'found: n_splines = {} and spline_order = {}'.format( + self.n_splines, self.spline_order + ) + ) # by if self.by is not None: - self.by = check_param(self.by, - param_name='by', - dtype='int', constraint='>= 0') + self.by = check_param( + self.by, param_name='by', dtype='int', constraint='>= 0' + ) return self @property def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" return self.n_splines def compile(self, X, verbose=False): @@ -749,19 +815,21 @@ def compile(self, X, verbose=False): None """ if self.feature >= X.shape[1]: - raise ValueError('term requires feature {}, '\ - 'but X has only {} dimensions'\ - .format(self.feature, X.shape[1])) + raise ValueError( + 'term requires feature {}, ' + 'but X has only {} dimensions'.format(self.feature, X.shape[1]) + ) if self.by is not None and self.by >= X.shape[1]: - raise ValueError('by variable requires feature {}, '\ - 'but X has only {} dimensions'\ - .format(self.by, X.shape[1])) + raise ValueError( + 'by variable requires feature {}, ' + 'but X has only {} dimensions'.format(self.by, X.shape[1]) + ) if not hasattr(self, 'edge_knots_'): - self.edge_knots_ = gen_edge_knots(X[:, self.feature], - self.dtype, - verbose=verbose) + self.edge_knots_ = gen_edge_knots( + X[:, self.feature], self.dtype, verbose=verbose + ) return self def build_columns(self, X, verbose=False): @@ -781,13 +849,15 @@ def build_columns(self, X, verbose=False): """ X[:, self.feature][:, np.newaxis] - splines = b_spline_basis(X[:, self.feature], - edge_knots=self.edge_knots_, - spline_order=self.spline_order, - n_splines=self.n_splines, - sparse=True, - periodic=self.basis in ['cp'], - verbose=verbose) + splines = b_spline_basis( + X[:, self.feature], + edge_knots=self.edge_knots_, + spline_order=self.spline_order, + n_splines=self.n_splines, + sparse=True, + periodic=self.basis in ['cp'], + verbose=verbose, + ) if self.by is not None: splines = splines.multiply(X[:, self.by][:, np.newaxis]) @@ -797,7 +867,10 @@ def build_columns(self, X, verbose=False): class FactorTerm(SplineTerm): _encodings = ['one-hot', 'dummy'] - def __init__(self, feature, lam=0.6, penalties='auto', coding='one-hot', verbose=False): + + def __init__( + self, feature, lam=0.6, penalties='auto', coding='one-hot', verbose=False + ): """creates an instance of a FactorTerm Parameters @@ -847,17 +920,26 @@ def __init__(self, feature, lam=0.6, penalties='auto', coding='one-hot', verbose contains dict with the sufficient information to duplicate the term """ self.coding = coding - super(FactorTerm, self).__init__(feature=feature, - lam=lam, - dtype='categorical', - spline_order=0, - penalties=penalties, - by=None, - constraints=None, - verbose=verbose) + super(FactorTerm, self).__init__( + feature=feature, + lam=lam, + dtype='categorical', + spline_order=0, + penalties=penalties, + by=None, + constraints=None, + verbose=verbose, + ) self._name = 'factor_term' self._minimal_name = 'f' - self._exclude += ['dtype', 'spline_order', 'by', 'n_splines', 'basis', 'constraints'] + self._exclude += [ + 'dtype', + 'spline_order', + 'by', + 'n_splines', + 'basis', + 'constraints', + ] def _validate_arguments(self): """method to sanitize model parameters @@ -872,12 +954,13 @@ def _validate_arguments(self): """ super(FactorTerm, self)._validate_arguments() if self.coding not in self._encodings: - raise ValueError("coding must be one of {}, "\ - "but found: {}".format(self._encodings, self.coding)) + raise ValueError( + "coding must be one of {}, " + "but found: {}".format(self._encodings, self.coding) + ) return self - def compile(self, X, verbose=False): """method to validate and prepare data-dependent parameters @@ -896,9 +979,9 @@ def compile(self, X, verbose=False): super(FactorTerm, self).compile(X) self.n_splines = len(np.unique(X[:, self.feature])) - self.edge_knots_ = gen_edge_knots(X[:, self.feature], - self.dtype, - verbose=verbose) + self.edge_knots_ = gen_edge_knots( + X[:, self.feature], self.dtype, verbose=verbose + ) return self def build_columns(self, X, verbose=False): @@ -924,24 +1007,24 @@ def build_columns(self, X, verbose=False): @property def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" return self.n_splines - 1 * (self.coding in ['dummy']) + class MetaTermMixin(object): _plural = [ - 'feature', - 'dtype', - 'fit_linear', - 'fit_splines', - 'lam', - 'n_splines', - 'spline_order', - 'constraints', - 'penalties', - 'basis', - 'edge_knots_' - ] + 'feature', + 'dtype', + 'fit_linear', + 'fit_splines', + 'lam', + 'n_splines', + 'spline_order', + 'constraints', + 'penalties', + 'basis', + 'edge_knots_', + ] _term_location = '_terms' def _super_get(self, name): @@ -955,13 +1038,14 @@ def _super_has(self, name): return False def _has_terms(self): - """bool, whether the instance has any sub-terms - """ + """bool, whether the instance has any sub-terms""" loc = self._super_get('_term_location') - return self._super_has(loc) \ - and isiterable(self._super_get(loc)) \ - and len(self._super_get(loc)) > 0 \ - and all([isinstance(term, Term) for term in self._super_get(loc)]) + return ( + self._super_has(loc) + and isiterable(self._super_get(loc)) + and len(self._super_get(loc)) > 0 + and all([isinstance(term, Term) for term in self._super_get(loc)]) + ) def _get_terms(self): """get the terms in the instance @@ -986,14 +1070,16 @@ def __setattr__(self, name, value): if isiterable(value): value = flatten(value) if len(value) != size: - raise ValueError('Expected {} to have length {}, but found {} = {}'\ - .format(name, size, name, value)) + raise ValueError( + 'Expected {} to have length {}, but found {} = {}'.format( + name, size, name, value + ) + ) else: value = [value] * size # now set each term's sequence of arguments for term in self._get_terms()[::-1]: - # skip intercept if term.isintercept: continue @@ -1012,11 +1098,9 @@ def __setattr__(self, name, value): def __getattr__(self, name): if self._has_terms() and name in self._super_get('_plural'): - # collect value from each term values = [] for term in self._get_terms(): - # skip the intercept if term.isintercept: continue @@ -1028,7 +1112,7 @@ def __getattr__(self, name): class TensorTerm(SplineTerm, MetaTermMixin): - _N_SPLINES = 10 # default num splines + _N_SPLINES = 10 # default num splines def __init__(self, *args, **kwargs): """creates an instance of a TensorTerm @@ -1126,16 +1210,16 @@ def __init__(self, *args, **kwargs): self._minimal_name = 'te' self._exclude = [ - 'feature', - 'dtype', - 'fit_linear', - 'fit_splines', - 'lam', - 'n_splines', - 'spline_order', - 'constraints', - 'penalties', - 'basis', + 'feature', + 'dtype', + 'fit_linear', + 'fit_splines', + 'lam', + 'n_splines', + 'spline_order', + 'constraints', + 'penalties', + 'basis', ] for param in self._exclude: delattr(self, param) @@ -1150,20 +1234,27 @@ def _parse_terms(self, args, **kwargs): for k, v in kwargs.items(): if isiterable(v): if len(v) != m: - raise ValueError('Expected {} to have length {}, but found {} = {}'\ - .format(k, m, k, v)) + raise ValueError( + 'Expected {} to have length {}, but found {} = {}'.format( + k, m, k, v + ) + ) else: kwargs[k] = [v] * m terms = [] for i, arg in enumerate(np.atleast_1d(args)): if isinstance(arg, TensorTerm): - raise ValueError('TensorTerm does not accept other TensorTerms. '\ - 'Please build a flat TensorTerm instead of a nested one.') + raise ValueError( + 'TensorTerm does not accept other TensorTerms. ' + 'Please build a flat TensorTerm instead of a nested one.' + ) if isinstance(arg, Term): if self.verbose and kwargs: - warnings.warn('kwargs are skipped when Term instances are passed to TensorTerm constructor') + warnings.warn( + 'kwargs are skipped when Term instances are passed to TensorTerm constructor' # noqa: E501 + ) terms.append(arg) continue @@ -1178,7 +1269,7 @@ def __len__(self): return len(self._terms) def __getitem__(self, i): - return self._terms[i] + return self._terms[i] def _validate_arguments(self): """method to sanitize model parameters @@ -1210,7 +1301,7 @@ def info(self): dict containing information to duplicate this term """ info = super(TensorTerm, self).info - info.update({'terms':[term.info for term in self._terms]}) + info.update({'terms': [term.info for term in self._terms]}) return info @classmethod @@ -1235,8 +1326,7 @@ def build_from_info(cls, info): @property def hasconstraint(self): - """bool, whether the term has any constraints - """ + """bool, whether the term has any constraints""" constrained = False for term in self._terms: constrained = constrained or term.hasconstraint @@ -1244,8 +1334,7 @@ def hasconstraint(self): @property def n_coefs(self): - """Number of coefficients contributed by the term to the model - """ + """Number of coefficients contributed by the term to the model""" return np.prod([term.n_coefs for term in self._terms]) def compile(self, X, verbose=False): @@ -1267,9 +1356,10 @@ def compile(self, X, verbose=False): term.compile(X, verbose=False) if self.by is not None and self.by >= X.shape[1]: - raise ValueError('by variable requires feature {}, '\ - 'but X has only {} dimensions'\ - .format(self.by, X.shape[1])) + raise ValueError( + 'by variable requires feature {}, ' + 'but X has only {} dimensions'.format(self.by, X.shape[1]) + ) return self def build_columns(self, X, verbose=False): @@ -1363,7 +1453,9 @@ def build_constraints(self, coef, constraint_lam, constraint_l2): """ C = sp.sparse.csc_matrix(np.zeros((self.n_coefs, self.n_coefs))) for i in range(len(self._terms)): - C += self._build_marginal_constraints(i, coef, constraint_lam, constraint_l2) + C += self._build_marginal_constraints( + i, coef, constraint_lam, constraint_l2 + ) return sp.sparse.csc_matrix(C) @@ -1404,7 +1496,9 @@ def _build_marginal_constraints(self, i, coef, constraint_lam, constraint_l2): coef_slice = coef[slice_] # build the constraint matrix for that slice - slice_C = self._terms[i].build_constraints(coef_slice, constraint_lam, constraint_l2) + slice_C = self._terms[i].build_constraints( + coef_slice, constraint_lam, constraint_l2 + ) # now enter it into the composite composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A @@ -1435,7 +1529,9 @@ def _iterate_marginal_coef_slices(self, i): idxs = idxs.reshape(dims) # reshape to a 2d matrix, where we can loop over rows - idxs = np.moveaxis(idxs, i, 0).reshape(idxs.shape[i], int(idxs.size/idxs.shape[i])) + idxs = np.moveaxis(idxs, i, 0).reshape( + idxs.shape[i], int(idxs.size / idxs.shape[i]) + ) # loop over rows for slice_ in idxs.T: @@ -1444,6 +1540,7 @@ def _iterate_marginal_coef_slices(self, i): class TermList(Core, MetaTermMixin): _terms = [] + def __init__(self, *terms, **kwargs): """creates an instance of a TermList @@ -1512,21 +1609,23 @@ def deduplicate(term, term_list, uniques_dict): for term_ in term._terms: term_list = deduplicate(term_, term_list, uniques) else: - raise ValueError('terms must be instances of Term or TermList, '\ - 'but found term: {}'.format(term)) + raise ValueError( + 'terms must be instances of Term or TermList, ' + 'but found term: {}'.format(term) + ) self._terms = self._terms + term_list self._exclude = [ - 'feature', - 'dtype', - 'fit_linear', - 'fit_splines', - 'lam', - 'n_splines', - 'spline_order', - 'constraints', - 'penalties', - 'basis', + 'feature', + 'dtype', + 'fit_linear', + 'fit_splines', + 'lam', + 'n_splines', + 'spline_order', + 'constraints', + 'penalties', + 'basis', ] self.verbose = any([term.verbose for term in self._terms]) or self.verbose @@ -1580,7 +1679,7 @@ def info(self): dict containing information to duplicate the term list """ info = {'term_type': 'term_list', 'verbose': self.verbose} - info.update({'terms':[term.info for term in self._terms]}) + info.update({'terms': [term.info for term in self._terms]}) return info @classmethod @@ -1643,21 +1742,23 @@ def pop(self, i=None): ------- term : Term """ - if i == None: + if i is None: i = len(self) - 1 if i >= len(self._terms) or i < 0: - raise ValueError('requested pop {}th term, but found only {} terms'\ - .format(i, len(self._terms))) + raise ValueError( + 'requested pop {}th term, but found only {} terms'.format( + i, len(self._terms) + ) + ) term = self._terms[i] - self._terms = self._terms[:i] + self._terms[i+1:] + self._terms = self._terms[:i] + self._terms[i + 1 :] return term @property def hasconstraint(self): - """bool, whether the term has any constraints - """ + """bool, whether the term has any constraints""" constrained = False for term in self._terms: constrained = constrained or term.hasconstraint @@ -1665,8 +1766,7 @@ def hasconstraint(self): @property def n_coefs(self): - """Total number of coefficients contributed by the terms in the model - """ + """Total number of coefficients contributed by the terms in the model""" return sum([term.n_coefs for term in self._terms]) def get_coef_indices(self, i=-1): @@ -1686,8 +1786,11 @@ def get_coef_indices(self, i=-1): return list(range(self.n_coefs)) if i >= len(self._terms): - raise ValueError('requested {}th term, but found only {} terms'\ - .format(i, len(self._terms))) + raise ValueError( + 'requested {}th term, but found only {} terms'.format( + i, len(self._terms) + ) + ) start = 0 for term in self._terms[:i]: @@ -1775,8 +1878,9 @@ def build_constraints(self, coefs, constraint_lam, constraint_l2): C.append(term.build_constraints(coefs[idxs], constraint_lam, constraint_l2)) return sp.sparse.block_diag(C) + # Minimal representations -def l(feature, lam=0.6, penalties='auto', verbose=False): +def l(feature, lam=0.6, penalties='auto', verbose=False): # noqa: E743 """ See Also @@ -1785,19 +1889,40 @@ def l(feature, lam=0.6, penalties='auto', verbose=False): """ return LinearTerm(feature=feature, lam=lam, penalties=penalties, verbose=verbose) -def s(feature, n_splines=20, spline_order=3, lam=0.6, - penalties='auto', constraints=None, dtype='numerical', - basis='ps', by=None, edge_knots=None, verbose=False): + +def s( + feature, + n_splines=20, + spline_order=3, + lam=0.6, + penalties='auto', + constraints=None, + dtype='numerical', + basis='ps', + by=None, + edge_knots=None, + verbose=False, +): """ See Also -------- SplineTerm : for developer details """ - return SplineTerm(feature=feature, n_splines=n_splines, spline_order=spline_order, - lam=lam, penalties=penalties, constraints=constraints, - dtype=dtype, basis=basis, by=by, edge_knots=edge_knots, - verbose=verbose) + return SplineTerm( + feature=feature, + n_splines=n_splines, + spline_order=spline_order, + lam=lam, + penalties=penalties, + constraints=constraints, + dtype=dtype, + basis=basis, + by=by, + edge_knots=edge_knots, + verbose=verbose, + ) + def f(feature, lam=0.6, penalties='auto', coding='one-hot', verbose=False): """ @@ -1806,8 +1931,10 @@ def f(feature, lam=0.6, penalties='auto', coding='one-hot', verbose=False): -------- FactorTerm : for developer details """ - return FactorTerm(feature=feature, lam=lam, penalties=penalties, - coding=coding, verbose=verbose) + return FactorTerm( + feature=feature, lam=lam, penalties=penalties, coding=coding, verbose=verbose + ) + def te(*args, **kwargs): """ @@ -1818,18 +1945,22 @@ def te(*args, **kwargs): """ return TensorTerm(*args, **kwargs) + intercept = Intercept() # copy docs -for minimal_, class_ in zip([l, s, f, te], [LinearTerm, SplineTerm, FactorTerm, TensorTerm]): +for minimal_, class_ in zip( + [l, s, f, te], [LinearTerm, SplineTerm, FactorTerm, TensorTerm] +): minimal_.__doc__ = class_.__init__.__doc__ + minimal_.__doc__ -TERMS = {'term' : Term, - 'intercept_term' : Intercept, - 'linear_term': LinearTerm, - 'spline_term': SplineTerm, - 'factor_term': FactorTerm, - 'tensor_term': TensorTerm, - 'term_list': TermList +TERMS = { + 'term': Term, + 'intercept_term': Intercept, + 'linear_term': LinearTerm, + 'spline_term': SplineTerm, + 'factor_term': FactorTerm, + 'tensor_term': TensorTerm, + 'term_list': TermList, } diff --git a/pygam/tests/conftest.py b/pygam/tests/conftest.py index 349cd447..3a39c108 100644 --- a/pygam/tests/conftest.py +++ b/pygam/tests/conftest.py @@ -1,13 +1,28 @@ # -*- coding: utf-8 -*- import pytest -import pandas as pd -import numpy as np -from pygam import * -from pygam.datasets import (mcycle, coal, faithful, cake, coal, default, trees, - hepatitis, wage, toy_classification, - head_circumference, chicago, toy_interaction) +from pygam import ( + LinearGAM, + PoissonGAM, + s, + f, + te, +) +from pygam.datasets import ( + mcycle, + coal, + faithful, + cake, + default, + trees, + hepatitis, + wage, + toy_classification, + head_circumference, + chicago, + toy_interaction, +) @pytest.fixture @@ -16,84 +31,98 @@ def mcycle_X_y(): # recommend LinearGAM return mcycle(return_X_y=True) + @pytest.fixture def mcycle_gam(mcycle_X_y): X, y = mcycle_X_y - gam = LinearGAM().fit(X,y) + gam = LinearGAM().fit(X, y) return gam + @pytest.fixture def coal_X_y(): # y is counts # recommend PoissonGAM return coal(return_X_y=True) + @pytest.fixture def faithful_X_y(): # y is counts # recommend PoissonGAM return faithful(return_X_y=True) + @pytest.fixture def wage_X_y(): # y is real # recommend LinearGAM return wage(return_X_y=True) + @pytest.fixture def wage_gam(wage_X_y): X, y = wage_X_y gam = LinearGAM(s(0) + s(1) + f(2)).fit(X, y) return gam + @pytest.fixture def trees_X_y(): # y is real. # recommend InvGaussGAM, or GAM(distribution='gamma', link='log') return trees(return_X_y=True) + @pytest.fixture def default_X_y(): # y is binary # recommend LogisticGAM return default(return_X_y=True) + @pytest.fixture def cake_X_y(): # y is real # recommend LinearGAM return cake(return_X_y=True) + @pytest.fixture def hepatitis_X_y(): # y is real # recommend LinearGAM return hepatitis(return_X_y=True) + @pytest.fixture def toy_classification_X_y(): # y is binary ints # recommend LogisticGAM return toy_classification(return_X_y=True) + @pytest.fixture def head_circumference_X_y(): # centile data # recommend ExpectileGAM return head_circumference(return_X_y=True) + @pytest.fixture def chicago_X_y(): # y is counts # recommend PoissonGAM return chicago(return_X_y=True) + @pytest.fixture def chicago_gam(chicago_X_y): X, y = chicago_X_y gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y) return gam + @pytest.fixture def toy_interaction_X_y(): # y is real diff --git a/pygam/tests/test_GAM_methods.py b/pygam/tests/test_GAM_methods.py index f74999d5..9b808c41 100644 --- a/pygam/tests/test_GAM_methods.py +++ b/pygam/tests/test_GAM_methods.py @@ -6,7 +6,16 @@ import pytest import scipy as sp -from pygam import * +from pygam import ( + GAM, + LinearGAM, + LogisticGAM, + PoissonGAM, + ExpectileGAM, + l, + s, + f, +) def test_LinearGAM_prediction(mcycle_X_y, mcycle_gam): @@ -15,7 +24,8 @@ def test_LinearGAM_prediction(mcycle_X_y, mcycle_gam): """ X, y = mcycle_X_y preds = mcycle_gam.predict(X) - assert(preds.shape == y.shape) + assert preds.shape == y.shape + def test_LogisticGAM_accuracy(default_X_y): """ @@ -27,7 +37,8 @@ def test_LogisticGAM_accuracy(default_X_y): preds = gam.predict(X) acc0 = (preds == y).mean() acc1 = gam.accuracy(X, y) - assert(acc0 == acc1) + assert acc0 == acc1 + def test_PoissonGAM_exposure(coal_X_y): """ @@ -35,7 +46,8 @@ def test_PoissonGAM_exposure(coal_X_y): """ X, y = coal_X_y gam = PoissonGAM().fit(X, y, exposure=np.ones_like(y)) - assert((gam.predict(X, exposure=np.ones_like(y)*2) == 2 *gam.predict(X)).all()) + assert (gam.predict(X, exposure=np.ones_like(y) * 2) == 2 * gam.predict(X)).all() + def test_PoissonGAM_loglike(coal_X_y): """ @@ -49,7 +61,10 @@ def test_PoissonGAM_loglike(coal_X_y): gam_high_var = PoissonGAM().fit(X, y * 2, exposure=exposure * 2) gam_low_var = PoissonGAM().fit(X, y, exposure=exposure) - assert gam_high_var.loglikelihood(X, y * 2, exposure * 2) < gam_low_var.loglikelihood(X, y, exposure) + assert gam_high_var.loglikelihood( + X, y * 2, exposure * 2 + ) < gam_low_var.loglikelihood(X, y, exposure) + def test_large_GAM(coal_X_y): """ @@ -58,7 +73,8 @@ def test_large_GAM(coal_X_y): X = np.linspace(0, 100, 100000) y = X**2 gam = LinearGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_summary(mcycle_X_y, mcycle_gam): """ @@ -68,12 +84,13 @@ def test_summary(mcycle_X_y, mcycle_gam): gam = LinearGAM() try: - gam.summary() + gam.summary() except AttributeError: - assert(True) + assert True mcycle_gam.summary() - assert(True) + assert True + def test_more_splines_than_samples(mcycle_X_y): """ @@ -82,8 +99,8 @@ def test_more_splines_than_samples(mcycle_X_y): X, y = mcycle_X_y n = len(X) - gam = LinearGAM(s(0, n_splines=n+1)).fit(X, y) - assert(gam._is_fitted) + gam = LinearGAM(s(0, n_splines=n + 1)).fit(X, y) + assert gam._is_fitted # TODO here is our bug: # we cannot display the term-by-term effective DoF because we have fewer @@ -91,6 +108,7 @@ def test_more_splines_than_samples(mcycle_X_y): assert len(gam.statistics_['edof_per_coef']) < len(gam.coef_) gam.summary() + def test_deviance_residuals(mcycle_X_y, mcycle_gam): """ for linear GAMs, the deviance residuals should be equal to the y - y_pred @@ -98,7 +116,8 @@ def test_deviance_residuals(mcycle_X_y, mcycle_gam): X, y = mcycle_X_y res = mcycle_gam.deviance_residuals(X, y) err = y - mcycle_gam.predict(X) - assert((res == err).all()) + assert (res == err).all() + def test_conf_intervals_return_array(mcycle_X_y, mcycle_gam): """ @@ -106,7 +125,8 @@ def test_conf_intervals_return_array(mcycle_X_y, mcycle_gam): """ X, y = mcycle_X_y conf_ints = mcycle_gam.confidence_intervals(X) - assert(conf_ints.ndim == 2) + assert conf_ints.ndim == 2 + def test_conf_intervals_quantiles_width_interchangable(mcycle_X_y, mcycle_gam): """ @@ -114,9 +134,10 @@ def test_conf_intervals_quantiles_width_interchangable(mcycle_X_y, mcycle_gam): should return the same result """ X, y = mcycle_X_y - conf_ints_a = mcycle_gam.confidence_intervals(X, width=.9) - conf_ints_b = mcycle_gam.confidence_intervals(X, quantiles=[.05, .95]) - assert(np.allclose(conf_ints_a, conf_ints_b)) + conf_ints_a = mcycle_gam.confidence_intervals(X, width=0.9) + conf_ints_b = mcycle_gam.confidence_intervals(X, quantiles=[0.05, 0.95]) + assert np.allclose(conf_ints_a, conf_ints_b) + def test_conf_intervals_ordered(mcycle_X_y, mcycle_gam): """ @@ -124,7 +145,8 @@ def test_conf_intervals_ordered(mcycle_X_y, mcycle_gam): """ X, y = mcycle_X_y conf_ints = mcycle_gam.confidence_intervals(X) - assert((conf_ints[:,0] <= conf_ints[:,1]).all()) + assert (conf_ints[:, 0] <= conf_ints[:, 1]).all() + def test_summary_returns_12_lines(mcycle_gam): """ @@ -153,15 +175,17 @@ def test_summary_returns_12_lines(mcycle_gam): WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with known smoothing parameters, but when smoothing parameters have been estimated, the p-values are typically lower than they should be, meaning that the tests reject the null too readily. - """ + """ # noqa: E501 if sys.version_info.major == 2: from StringIO import StringIO if sys.version_info.major == 3: - from io import StringIO - stdout = sys.stdout #keep a handle on the real standard output - sys.stdout = StringIO() #Choose a file-like object to write to + from io import StringIO # noqa: F811 + stdout = sys.stdout # keep a handle on the real standard output + sys.stdout = StringIO() # Choose a file-like object to write to mcycle_gam.summary() - assert(len(sys.stdout.getvalue().split('\n')) == 24) + assert len(sys.stdout.getvalue().split('\n')) == 24 + sys.stdout = stdout + def test_is_fitted_predict(mcycle_X_y): """ @@ -172,6 +196,7 @@ def test_is_fitted_predict(mcycle_X_y): with pytest.raises(AttributeError): gam.predict(X) + def test_is_fitted_predict_mu(mcycle_X_y): """ test predict_mu requires fitted model @@ -181,6 +206,7 @@ def test_is_fitted_predict_mu(mcycle_X_y): with pytest.raises(AttributeError): gam.predict_mu(X) + def test_is_fitted_dev_resid(mcycle_X_y): """ test deviance_residuals requires fitted model @@ -190,6 +216,7 @@ def test_is_fitted_dev_resid(mcycle_X_y): with pytest.raises(AttributeError): gam.deviance_residuals(X, y) + def test_is_fitted_conf_intervals(mcycle_X_y): """ test confidence_intervals requires fitted model @@ -199,6 +226,7 @@ def test_is_fitted_conf_intervals(mcycle_X_y): with pytest.raises(AttributeError): gam.confidence_intervals(X) + def test_is_fitted_pdep(mcycle_X_y): """ test partial_dependence requires fitted model @@ -207,6 +235,7 @@ def test_is_fitted_pdep(mcycle_X_y): with pytest.raises(AttributeError): gam.partial_dependence(term=0) + def test_is_fitted_summary(mcycle_X_y): """ test summary requires fitted model @@ -216,13 +245,15 @@ def test_is_fitted_summary(mcycle_X_y): with pytest.raises(AttributeError): gam.summary() + def test_set_params_with_external_param(): """ test set_params sets a real parameter """ gam = GAM(lam=1) gam.set_params(lam=420) - assert(gam.lam == 420) + assert gam.lam == 420 + def test_set_params_with_phony_param(): """ @@ -230,17 +261,19 @@ def test_set_params_with_phony_param(): """ gam = GAM() gam.set_params(cat=420) - assert(not hasattr(gam, 'cat')) + assert not hasattr(gam, 'cat') + def test_set_params_with_phony_param_force(): """ test set_params can set phony params if we use the force=True """ gam = GAM() - assert(not hasattr(gam, 'cat')) + assert not hasattr(gam, 'cat') gam.set_params(cat=420, force=True) - assert(gam.cat == 420) + assert gam.cat == 420 + def test_get_params(): """ @@ -248,11 +281,10 @@ def test_get_params(): """ gam = GAM(lam=420) params = gam.get_params() - assert(params['lam'] == 420) + assert params['lam'] == 420 class TestSamplingFromPosterior(object): - def test_drawing_samples_from_unfitted_model(self, mcycle_X_y, mcycle_gam): X, y = mcycle_X_y gam = LinearGAM() @@ -298,12 +330,15 @@ def test_shape_of_random_samples(self, mcycle_X_y, mcycle_gam): idxs = np.random.choice(np.arange(len(X)), n_samples_in_grid) XX = X[idxs] - sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws, - sample_at_X=XX) - sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws, - sample_at_X=XX) - sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws, - sample_at_X=XX) + sample_coef = mcycle_gam.sample( + X, y, quantity='coef', n_draws=n_draws, sample_at_X=XX + ) + sample_mu = mcycle_gam.sample( + X, y, quantity='mu', n_draws=n_draws, sample_at_X=XX + ) + sample_y = mcycle_gam.sample( + X, y, quantity='y', n_draws=n_draws, sample_at_X=XX + ) assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_)) assert sample_mu.shape == (n_draws, n_samples_in_grid) @@ -313,9 +348,12 @@ def test_shape_bootstrap_samples_of_smoothing(self, mcycle_X_y, mcycle_gam): X, y = mcycle_X_y for n_bootstraps in [1, 2]: - coef_bootstraps, cov_bootstraps = ( - mcycle_gam._bootstrap_samples_of_smoothing( - X, y, n_bootstraps=n_bootstraps)) + ( + coef_bootstraps, + cov_bootstraps, + ) = mcycle_gam._bootstrap_samples_of_smoothing( + X, y, n_bootstraps=n_bootstraps + ) assert len(coef_bootstraps) == len(cov_bootstraps) == n_bootstraps for coef, cov in zip(coef_bootstraps, cov_bootstraps): assert coef.shape == mcycle_gam.coef_.shape @@ -323,7 +361,8 @@ def test_shape_bootstrap_samples_of_smoothing(self, mcycle_X_y, mcycle_gam): for n_draws in [1, 2]: coef_draws = mcycle_gam._simulate_coef_from_bootstraps( - n_draws, coef_bootstraps, cov_bootstraps) + n_draws, coef_bootstraps, cov_bootstraps + ) assert coef_draws.shape == (n_draws, len(mcycle_gam.coef_)) def test_bad_sample_params(self, mcycle_X_y, mcycle_gam): @@ -340,15 +379,15 @@ def test_prediction_interval_unknown_scale(): we test at a large sample limit, where the t distribution becomes normal """ n = 1000000 - X = np.linspace(0,1,n) + X = np.linspace(0, 1, n) y = np.random.randn(n) gam_a = LinearGAM(terms=l(0)).fit(X, y) gam_b = LinearGAM(s(0, n_splines=4)).fit(X, y) XX = gam_a.generate_X_grid(term=0) - intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0) - intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0) + intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, 0.9]).mean(axis=0) + intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, 0.9]).mean(axis=0) assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01) assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01) @@ -356,21 +395,22 @@ def test_prediction_interval_unknown_scale(): assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01) assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01) + def test_prediction_interval_known_scale(): """ the prediction intervals should be correct to a few decimal places we test at a large sample limit. """ n = 1000000 - X = np.linspace(0,1,n) + X = np.linspace(0, 1, n) y = np.random.randn(n) - gam_a = LinearGAM(terms=l(0), scale=1.).fit(X, y) - gam_b = LinearGAM(s(0, n_splines=4), scale=1.).fit(X, y) + gam_a = LinearGAM(terms=l(0), scale=1.0).fit(X, y) + gam_b = LinearGAM(s(0, n_splines=4), scale=1.0).fit(X, y) XX = gam_a.generate_X_grid(term=0) - intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0) - intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0) + intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, 0.9]).mean(axis=0) + intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, 0.9]).mean(axis=0) assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01) assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01) @@ -378,6 +418,7 @@ def test_prediction_interval_known_scale(): assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01) assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01) + def test_pvalue_rejects_useless_feature(wage_X_y): """ check that a p-value can reject a useless feature @@ -391,7 +432,8 @@ def test_pvalue_rejects_useless_feature(wage_X_y): # now do the test, with some safety p_values = gam._estimate_p_values() print(p_values) - assert(p_values[-2] > .5) # because -1 is intercept + assert p_values[-2] > 0.5 # because -1 is intercept + def test_fit_quantile_is_close_enough(head_circumference_X_y): """see that we get close to the desired quantile @@ -415,8 +457,7 @@ def test_fit_quantile_is_close_enough(head_circumference_X_y): def test_fit_quantile_NOT_close_enough(head_circumference_X_y): - """see that we DO NOT get close to the desired quantile - """ + """see that we DO NOT get close to the desired quantile""" X, y = head_circumference_X_y quantile = 0.99 @@ -427,9 +468,9 @@ def test_fit_quantile_NOT_close_enough(head_circumference_X_y): assert np.abs(ratio - quantile) > tol + def test_fit_quantile_raises_ValueError(head_circumference_X_y): - """see that we DO NOT get fit on bad argument requests - """ + """see that we DO NOT get fit on bad argument requests""" X, y = head_circumference_X_y with pytest.raises(ValueError): @@ -456,12 +497,14 @@ def test_fit_quantile_raises_ValueError(head_circumference_X_y): with pytest.raises(ValueError): ExpectileGAM().fit_quantile(X, y, max_iter=-1, quantile=0.5) + class TestRegressions(object): def test_pvalue_invariant_to_scale(self, wage_X_y): """ regression test. - a bug made the F-statistic sensitive to scale changes, when it should be invariant. + a bug made the F-statistic sensitive to scale changes, + when it should be invariant. check that a p-value should not change when we change the scale of the response """ @@ -505,7 +548,7 @@ def test_non_int_exposure_produced_no_inf_in_PoissonGAM_ll(self, coal_X_y): """ X, y = coal_X_y - rate = 1.2 + np.cos(np.linspace(0, 2. * np.pi, len(y))) + rate = 1.2 + np.cos(np.linspace(0, 2.0 * np.pi, len(y))) gam = PoissonGAM().fit(X, y, exposure=rate) diff --git a/pygam/tests/test_GAM_params.py b/pygam/tests/test_GAM_params.py index 5cd76e85..affeb955 100644 --- a/pygam/tests/test_GAM_params.py +++ b/pygam/tests/test_GAM_params.py @@ -3,7 +3,13 @@ import numpy as np import pytest -from pygam import * +from pygam import ( + LinearGAM, + LogisticGAM, + s, + l, + intercept, +) def test_lam_non_neg_array_like(cake_X_y): @@ -12,15 +18,12 @@ def test_lam_non_neg_array_like(cake_X_y): """ X, y = cake_X_y - try: - gam = LinearGAM(lam=-1).fit(X, y) - except ValueError: - assert(True) + with pytest.raises(ValueError): + LinearGAM(lam=-1).fit(X, y) # noqa: F841 + + with pytest.raises(ValueError): + LinearGAM(lam=['hi']).fit(X, y) - try: - gam = LinearGAM(lam=['hi']).fit(X, y) - except ValueError: - assert(True) def test_penalties_must_be_or_contain_callable_or_auto(mcycle_X_y): """ @@ -29,11 +32,12 @@ def test_penalties_must_be_or_contain_callable_or_auto(mcycle_X_y): X, y = mcycle_X_y with pytest.raises(ValueError): - gam = LinearGAM(terms=s(0, penalties='continuous')) + LinearGAM(terms=s(0, penalties='continuous')) # now do iterable with pytest.raises(ValueError): - gam = LinearGAM(s(0, penalties=['continuous'])) + LinearGAM(s(0, penalties=['continuous'])) + def test_intercept(mcycle_X_y): """ @@ -43,6 +47,7 @@ def test_intercept(mcycle_X_y): gam = LinearGAM(terms=intercept) gam.fit(X, y) + def test_require_one_term(mcycle_X_y): """ need at least one term @@ -52,21 +57,25 @@ def test_require_one_term(mcycle_X_y): with pytest.raises(ValueError): gam.fit(X, y) + def test_linear_regression(mcycle_X_y): """ should be able to do linear regression """ X, y = mcycle_X_y gam = LinearGAM(l(0)).fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_compute_stats_even_if_not_enough_iters(default_X_y): """ - GAM should collect model statistics after optimization ends even if it didnt converge + GAM should collect model statistics after optimization ends even if it didnt + converge """ X, y = default_X_y gam = LogisticGAM(max_iter=1).fit(X, y) - assert(hasattr(gam, 'statistics_')) + assert hasattr(gam, 'statistics_') + def test_easy_plural_arguments(wage_X_y): """ @@ -78,6 +87,7 @@ def test_easy_plural_arguments(wage_X_y): assert gam._is_fitted assert gam.n_splines == [10] * X.shape[1] + class TestRegressions(object): def test_no_explicit_terms_custom_lambda(self, wage_X_y): X, y = wage_X_y @@ -94,10 +104,10 @@ def test_no_explicit_terms_custom_lambda(self, wage_X_y): def test_n_splines_not_int(self, mcycle_X_y): """ - used to fail for n_splines of type np.int64, as returned by np.arange + used to fail for n_splines of type int64, as returned by np.arange """ X, y = mcycle_X_y - gam = LinearGAM(n_splines=np.arange(9,10)[0]).fit(X, y) + gam = LinearGAM(n_splines=np.arange(9, 10)[0]).fit(X, y) assert gam._is_fitted diff --git a/pygam/tests/test_GAMs.py b/pygam/tests/test_GAMs.py index 47632f4f..d88f962c 100644 --- a/pygam/tests/test_GAMs.py +++ b/pygam/tests/test_GAMs.py @@ -2,7 +2,15 @@ import pytest -from pygam import * +from pygam import ( + GAM, + LinearGAM, + LogisticGAM, + PoissonGAM, + GammaGAM, + InvGaussGAM, + ExpectileGAM, +) def test_can_build_sub_models(): @@ -15,7 +23,8 @@ def test_can_build_sub_models(): GammaGAM() InvGaussGAM() ExpectileGAM() - assert(True) + assert True + def test_LinearGAM_uni(mcycle_X_y): """ @@ -23,7 +32,8 @@ def test_LinearGAM_uni(mcycle_X_y): """ X, y = mcycle_X_y gam = LinearGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_LinearGAM_multi(wage_X_y): """ @@ -31,7 +41,8 @@ def test_LinearGAM_multi(wage_X_y): """ X, y = wage_X_y gam = LinearGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_LogisticGAM(default_X_y): """ @@ -39,7 +50,8 @@ def test_LogisticGAM(default_X_y): """ X, y = default_X_y gam = LogisticGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_PoissonGAM(coal_X_y): """ @@ -47,7 +59,8 @@ def test_PoissonGAM(coal_X_y): """ X, y = coal_X_y gam = PoissonGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_InvGaussGAM(trees_X_y): """ @@ -55,7 +68,8 @@ def test_InvGaussGAM(trees_X_y): """ X, y = trees_X_y gam = InvGaussGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_GammaGAM(trees_X_y): """ @@ -63,7 +77,8 @@ def test_GammaGAM(trees_X_y): """ X, y = trees_X_y gam = GammaGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_CustomGAM(trees_X_y): """ @@ -71,7 +86,8 @@ def test_CustomGAM(trees_X_y): """ X, y = trees_X_y gam = GAM(distribution='gamma', link='inverse').fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_ExpectileGAM_uni(mcycle_X_y): """ @@ -79,7 +95,8 @@ def test_ExpectileGAM_uni(mcycle_X_y): """ X, y = mcycle_X_y gam = ExpectileGAM().fit(X, y) - assert(gam._is_fitted) + assert gam._is_fitted + def test_ExpectileGAM_bad_expectiles(mcycle_X_y): """ @@ -94,4 +111,6 @@ def test_ExpectileGAM_bad_expectiles(mcycle_X_y): ExpectileGAM(expectile=-0.1).fit(X, y) with pytest.raises(ValueError): ExpectileGAM(expectile=1.1).fit(X, y) + + # TODO check dicts: DISTRIBUTIONS etc diff --git a/pygam/tests/test_core.py b/pygam/tests/test_core.py index e2205843..f69bb4b7 100644 --- a/pygam/tests/test_core.py +++ b/pygam/tests/test_core.py @@ -1,21 +1,20 @@ # -*- coding: utf-8 -*- -import numpy as np -import pytest +from pygam.core import Core, nice_repr -from pygam.core import * def test_Core_class(): """ test attributes of core class """ c = Core() - assert(c._name == None) + assert c._name is None c = Core(name='cat', line_width=70, line_offset=3) - assert(c._name == 'cat') - assert(c._line_width == 70) - assert(c._line_offset == 3) + assert c._name == 'cat' + assert c._line_width == 70 + assert c._line_offset == 3 + def test_nice_repr(): """ @@ -23,12 +22,13 @@ def test_nice_repr(): """ param_kvs = {} out = nice_repr('hi', param_kvs, line_width=30, line_offset=5, decimals=3) - assert(out == "hi()") + assert out == "hi()" + def test_nice_repr_more_attrs(): """ test a simple repr for a fake object with more attrs """ - param_kvs = {'color': 'blue', 'n_ears': 3, 'height':1.3336} + param_kvs = {'color': 'blue', 'n_ears': 3, 'height': 1.3336} out = nice_repr('hi', param_kvs, line_width=60, line_offset=5, decimals=3) - assert(out == "hi(color='blue', height=1.334, n_ears=3)") + assert out == "hi(color='blue', height=1.334, n_ears=3)" diff --git a/pygam/tests/test_datasets.py b/pygam/tests/test_datasets.py index f1daf4be..f17862e4 100644 --- a/pygam/tests/test_datasets.py +++ b/pygam/tests/test_datasets.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -import numpy as np -import pytest - from pygam.datasets import cake from pygam.datasets import coal from pygam.datasets import default @@ -10,11 +7,9 @@ from pygam.datasets import hepatitis from pygam.datasets import mcycle from pygam.datasets import trees -from pygam.datasets import wage from pygam.datasets import chicago from pygam.datasets import toy_interaction -from pygam.datasets import __all__ as DATASETS def _test_dataset(dataset_loader, n_rows, n_columns_X, n_columns_df, n_rows_X=None): """check the length of the dataset is the same regardless of the transformation @@ -66,29 +61,38 @@ def _test_dataset(dataset_loader, n_rows, n_columns_X, n_columns_df, n_rows_X=No # check shape assert X_y[0].ndim == 2 + def test_cake(): _test_dataset(cake, n_rows=270, n_columns_X=3, n_columns_df=5) + def test_coal(): _test_dataset(coal, n_rows=191, n_columns_X=1, n_columns_df=1, n_rows_X=150) + def test_default(): _test_dataset(default, n_rows=10000, n_columns_X=3, n_columns_df=4) + def test_faithful(): _test_dataset(faithful, n_rows=272, n_columns_X=1, n_columns_df=2, n_rows_X=200) + def test_hepatitis(): _test_dataset(hepatitis, n_rows=86, n_columns_X=1, n_columns_df=3, n_rows_X=83) + def test_mcycle(): _test_dataset(mcycle, n_rows=133, n_columns_X=1, n_columns_df=2) + def test_trees(): _test_dataset(trees, n_rows=31, n_columns_X=2, n_columns_df=3) + def test_chicago(): _test_dataset(chicago, n_rows=5114, n_columns_X=4, n_columns_df=7, n_rows_X=4863) + def test_toy_interaction(): _test_dataset(toy_interaction, n_rows=50000, n_columns_X=2, n_columns_df=3) diff --git a/pygam/tests/test_gridsearch.py b/pygam/tests/test_gridsearch.py index c638f3ad..52a0f68d 100644 --- a/pygam/tests/test_gridsearch.py +++ b/pygam/tests/test_gridsearch.py @@ -2,7 +2,13 @@ import numpy as np import pytest -from pygam import * +from pygam import ( + LinearGAM, + LogisticGAM, + PoissonGAM, + GammaGAM, + InvGaussGAM, +) def test_gridsearch_returns_scores(mcycle_X_y): @@ -13,9 +19,10 @@ def test_gridsearch_returns_scores(mcycle_X_y): X, y = mcycle_X_y gam = LinearGAM() - scores = gam.gridsearch(X, y, lam=np.logspace(-3,3, n), return_scores=True) + scores = gam.gridsearch(X, y, lam=np.logspace(-3, 3, n), return_scores=True) + + assert len(scores) == n - assert(len(scores) == n) def test_gridsearch_returns_extra_score_if_fitted(mcycle_X_y): """ @@ -25,9 +32,10 @@ def test_gridsearch_returns_extra_score_if_fitted(mcycle_X_y): X, y = mcycle_X_y gam = LinearGAM().fit(X, y) - scores = gam.gridsearch(X, y, lam=np.logspace(-3,3, n), return_scores=True) + scores = gam.gridsearch(X, y, lam=np.logspace(-3, 3, n), return_scores=True) + + assert len(scores) == n + 1 - assert(len(scores) == n + 1) def test_gridsearch_keep_best(mcycle_X_y): """ @@ -39,10 +47,12 @@ def test_gridsearch_keep_best(mcycle_X_y): gam = LinearGAM(lam=1000000).fit(X, y) score1 = gam.statistics_['GCV'] - scores = gam.gridsearch(X, y, lam=np.logspace(-3,3, n), - keep_best=False, return_scores=True) + scores = gam.gridsearch( + X, y, lam=np.logspace(-3, 3, n), keep_best=False, return_scores=True + ) + + assert np.min(list(scores.values())) < score1 - assert(np.min(list(scores.values())) < score1) def test_gridsearch_improves_objective(mcycle_X_y): """ @@ -54,10 +64,11 @@ def test_gridsearch_improves_objective(mcycle_X_y): gam = LinearGAM().fit(X, y) objective_0 = gam.statistics_['GCV'] - gam = LinearGAM().gridsearch(X, y, lam=np.logspace(-2,0, n)) + gam = LinearGAM().gridsearch(X, y, lam=np.logspace(-2, 0, n)) objective_1 = gam.statistics_['GCV'] - assert(objective_1 <= objective_0) + assert objective_1 <= objective_0 + def test_gridsearch_all_dimensions_same(cake_X_y): """ @@ -66,12 +77,11 @@ def test_gridsearch_all_dimensions_same(cake_X_y): n = 5 X, y = cake_X_y - scores = LinearGAM().gridsearch(X, y, - lam=np.logspace(-3,3, n), - return_scores=True) + scores = LinearGAM().gridsearch(X, y, lam=np.logspace(-3, 3, n), return_scores=True) + + assert len(scores) == n + assert X.shape[1] > 1 - assert(len(scores) == n) - assert(X.shape[1] > 1) def test_gridsearch_all_dimensions_independent(cake_X_y): """ @@ -81,12 +91,13 @@ def test_gridsearch_all_dimensions_independent(cake_X_y): X, y = cake_X_y m = X.shape[1] - scores = LinearGAM().gridsearch(X, y, - lam=[np.logspace(-3,3, n)]*m, - return_scores=True) + scores = LinearGAM().gridsearch( + X, y, lam=[np.logspace(-3, 3, n)] * m, return_scores=True + ) + + assert len(scores) == n**m + assert m > 1 - assert(len(scores) == n**m) - assert(m > 1) def test_no_cartesian_product(cake_X_y): """ @@ -97,15 +108,14 @@ def test_no_cartesian_product(cake_X_y): X, y = cake_X_y m = X.shape[1] - lams = np.array([np.logspace(-3,3, n)]*m).T + lams = np.array([np.logspace(-3, 3, n)] * m).T assert lams.shape == (n, m) - scores = LinearGAM().gridsearch(X, y, - lam=lams, - return_scores=True) + scores = LinearGAM().gridsearch(X, y, lam=lams, return_scores=True) + + assert len(scores) == n + assert m > 1 - assert(len(scores) == n) - assert(m > 1) def test_wrong_grid_shape(cake_X_y): """ @@ -115,18 +125,17 @@ def test_wrong_grid_shape(cake_X_y): lams = np.random.rand(50, X.shape[1] + 1) with pytest.raises(ValueError): - scores = LinearGAM().gridsearch(X, y, - lam=lams, - return_scores=True) + LinearGAM().gridsearch(X, y, lam=lams, return_scores=True) lams = lams.T.tolist() assert len(lams) == X.shape[1] + 1 with pytest.raises(ValueError): - scores = LinearGAM().gridsearch(X, y, - lam=lams, - return_scores=True) + LinearGAM().gridsearch(X, y, lam=lams, return_scores=True) + -def test_GCV_objective_is_for_unknown_scale(mcycle_X_y, default_X_y, coal_X_y, trees_X_y): +def test_GCV_objective_is_for_unknown_scale( + mcycle_X_y, default_X_y, coal_X_y, trees_X_y +): """ check that we use the GCV objective only for models with unknown scale @@ -136,32 +145,41 @@ def test_GCV_objective_is_for_unknown_scale(mcycle_X_y, default_X_y, coal_X_y, t """ lam = np.linspace(1e-3, 1e3, 2) - unknown_scale = [(LinearGAM, mcycle_X_y), - (GammaGAM, trees_X_y), - (InvGaussGAM, trees_X_y)] - - known_scale = [(LogisticGAM, default_X_y), - (PoissonGAM, coal_X_y)] + unknown_scale = [ + (LinearGAM, mcycle_X_y), + (GammaGAM, trees_X_y), + (InvGaussGAM, trees_X_y), + ] + known_scale = [(LogisticGAM, default_X_y), (PoissonGAM, coal_X_y)] for gam, (X, y) in unknown_scale: - scores1 = list(gam().gridsearch(X, y, lam=lam, objective='auto', - return_scores=True).values()) - scores2 = list(gam().gridsearch(X, y, lam=lam, objective='GCV', - return_scores=True).values()) - assert(np.allclose(scores1, scores2)) + scores1 = list( + gam() + .gridsearch(X, y, lam=lam, objective='auto', return_scores=True) + .values() + ) + scores2 = list( + gam() + .gridsearch(X, y, lam=lam, objective='GCV', return_scores=True) + .values() + ) + assert np.allclose(scores1, scores2) for gam, (X, y) in known_scale: try: - list(gam().gridsearch(X, y, lam=lam, objective='GCV', - return_scores=True).values()) + list( + gam() + .gridsearch(X, y, lam=lam, objective='GCV', return_scores=True) + .values() + ) except ValueError: - assert(True) + assert True - - -def test_UBRE_objective_is_for_known_scale(mcycle_X_y, default_X_y, coal_X_y, trees_X_y): +def test_UBRE_objective_is_for_known_scale( + mcycle_X_y, default_X_y, coal_X_y, trees_X_y +): """ check that we use the UBRE objective only for models with known scale @@ -171,35 +189,46 @@ def test_UBRE_objective_is_for_known_scale(mcycle_X_y, default_X_y, coal_X_y, tr """ lam = np.linspace(1e-3, 1e3, 2) - unknown_scale = [(LinearGAM, mcycle_X_y), - (GammaGAM, trees_X_y), - (InvGaussGAM, trees_X_y)] + unknown_scale = [ + (LinearGAM, mcycle_X_y), + (GammaGAM, trees_X_y), + (InvGaussGAM, trees_X_y), + ] - known_scale = [(LogisticGAM, default_X_y), - (PoissonGAM, coal_X_y)] + known_scale = [(LogisticGAM, default_X_y), (PoissonGAM, coal_X_y)] for gam, (X, y) in known_scale: - scores1 = list(gam().gridsearch(X, y, lam=lam, objective='auto', - return_scores=True).values()) - scores2 = list(gam().gridsearch(X, y, lam=lam, objective='UBRE', - return_scores=True).values()) - assert(np.allclose(scores1, scores2)) + scores1 = list( + gam() + .gridsearch(X, y, lam=lam, objective='auto', return_scores=True) + .values() + ) + scores2 = list( + gam() + .gridsearch(X, y, lam=lam, objective='UBRE', return_scores=True) + .values() + ) + assert np.allclose(scores1, scores2) for gam, (X, y) in unknown_scale: try: - list(gam().gridsearch(X, y, lam=lam, objective='UBRE', - return_scores=True).values()) + list( + gam() + .gridsearch(X, y, lam=lam, objective='UBRE', return_scores=True) + .values() + ) except ValueError: - assert(True) + assert True + def test_no_models_fitted(mcycle_X_y): """ test no models fitted returns orginal gam """ X, y = mcycle_X_y - scores = LinearGAM().gridsearch(X, y, lam=[-3, -2,-1], return_scores=True) + scores = LinearGAM().gridsearch(X, y, lam=[-3, -2, -1], return_scores=True) # scores is not a dict of scores but an (unfitted) gam! - assert(not isinstance(scores, dict)) - assert(isinstance(scores, LinearGAM)) - assert(not scores._is_fitted) + assert not isinstance(scores, dict) + assert isinstance(scores, LinearGAM) + assert not scores._is_fitted diff --git a/pygam/tests/test_partial_dependence.py b/pygam/tests/test_partial_dependence.py index cd5fe124..21e7cce3 100644 --- a/pygam/tests/test_partial_dependence.py +++ b/pygam/tests/test_partial_dependence.py @@ -1,12 +1,8 @@ # -*- coding: utf-8 -*- -import sys - -import numpy as np import pytest -import scipy as sp -from pygam import * +from pygam import LinearGAM class TestPartialDepencence(object): @@ -19,7 +15,7 @@ def test_partial_dependence_on_univar_data(self, mcycle_X_y): gam = LinearGAM(fit_intercept=False).fit(X, y) pred = gam.predict(X) pdep = gam.partial_dependence(term=0, X=X) - assert((pred == pdep.ravel()).all()) + assert (pred == pdep.ravel()).all() def test_partial_dependence_on_univar_data2(self, mcycle_X_y, mcycle_gam): """ @@ -29,7 +25,7 @@ def test_partial_dependence_on_univar_data2(self, mcycle_X_y, mcycle_gam): X, y = mcycle_X_y pred = mcycle_gam.predict(X) pdep = mcycle_gam.partial_dependence(term=0, X=X) - assert((pred != pdep.ravel()).all()) + assert (pred != pdep.ravel()).all() def test_partial_dependence_feature_doesnt_exist(self, mcycle_gam): """ @@ -39,7 +35,9 @@ def test_partial_dependence_feature_doesnt_exist(self, mcycle_gam): with pytest.raises(ValueError): mcycle_gam.partial_dependence(term=10) - def test_partial_dependence_gives_correct_shape_no_meshgrid(self, chicago_gam, chicago_X_y): + def test_partial_dependence_gives_correct_shape_no_meshgrid( + self, chicago_gam, chicago_X_y + ): """ when `meshgrid=False`, partial dependence method should return - n points if no X is supplied @@ -57,7 +55,7 @@ def test_partial_dependence_gives_correct_shape_no_meshgrid(self, chicago_gam, c # no confidence intervals, no X pdep = chicago_gam.partial_dependence(term=i) - assert pdep.shape == (100**len(term),) + assert pdep.shape == (100 ** len(term),) # with confidence intervals, specify X pdep, confi = chicago_gam.partial_dependence(term=i, X=X, width=0.95) @@ -66,10 +64,12 @@ def test_partial_dependence_gives_correct_shape_no_meshgrid(self, chicago_gam, c # with confidence intervals, no X pdep, confi = chicago_gam.partial_dependence(term=i, width=0.95) - assert pdep.shape == (100**len(term),) - assert confi.shape == (100**len(term), 2) + assert pdep.shape == (100 ** len(term),) + assert confi.shape == (100 ** len(term), 2) - def test_partial_dependence_gives_correct_shape_with_meshgrid(self, chicago_gam, chicago_X_y): + def test_partial_dependence_gives_correct_shape_with_meshgrid( + self, chicago_gam, chicago_X_y + ): """ when `meshgrid=True`, partial dependence method should return - pdep is meshes with the dimension of the term @@ -93,16 +93,22 @@ def test_partial_dependence_gives_correct_shape_with_meshgrid(self, chicago_gam, assert pdep.shape == (100,) * len(term) # with confidence intervals, specify X - pdep, confi = chicago_gam.partial_dependence(term=i, X=XX, meshgrid=True, width=0.95) + pdep, confi = chicago_gam.partial_dependence( + term=i, X=XX, meshgrid=True, width=0.95 + ) assert pdep.shape == (50,) * len(term) assert confi.shape == (50,) * len(term) + (2,) # with confidence intervals, no X - pdep, confi = chicago_gam.partial_dependence(term=i, meshgrid=True, width=0.95) + pdep, confi = chicago_gam.partial_dependence( + term=i, meshgrid=True, width=0.95 + ) assert pdep.shape == (100,) * len(term) - assert confi.shape == (100,) * len(term) +(2,) + assert confi.shape == (100,) * len(term) + (2,) - def test_partital_dependence_width_and_quantiles_equivalent(self, chicago_gam, chicago_X_y): + def test_partital_dependence_width_and_quantiles_equivalent( + self, chicago_gam, chicago_X_y + ): """ for non-tensor terms, the outputs of `partial_dependence` is the same regardless of `meshgrid=True/False` @@ -113,7 +119,9 @@ def test_partital_dependence_width_and_quantiles_equivalent(self, chicago_gam, c assert (meshTrue == meshFalse).all() - def test_partial_dependence_meshgrid_true_false_equivalent_for_non_tensors(self, chicago_gam, chicago_X_y): + def test_partial_dependence_meshgrid_true_false_equivalent_for_non_tensors( + self, chicago_gam, chicago_X_y + ): """ for tensor terms the value of `meshgrid` matters """ @@ -133,14 +141,17 @@ def test_intercept_raises_error_for_partial_dependence(self, mcycle_X_y): gam_intercept = LinearGAM(fit_intercept=True).fit(X, y) with pytest.raises(ValueError): - pdeps = gam_intercept.partial_dependence(term=-1) + gam_intercept.partial_dependence(term=-1) gam_no_intercept = LinearGAM(fit_intercept=False).fit(X, y) - pdeps = gam_no_intercept.partial_dependence(term=-1) + gam_no_intercept.partial_dependence(term=-1) def test_no_X_needed_for_partial_dependence(self, mcycle_gam): """ partial_dependence() method uses generate_X_grid by default for the X array """ XX = mcycle_gam.generate_X_grid(term=0) - assert (mcycle_gam.partial_dependence(term=0) == mcycle_gam.partial_dependence(term=0, X=XX)).all() + assert ( + mcycle_gam.partial_dependence(term=0) + == mcycle_gam.partial_dependence(term=0, X=XX) + ).all() diff --git a/pygam/tests/test_penalties.py b/pygam/tests/test_penalties.py index 68037619..bf6dd68a 100644 --- a/pygam/tests/test_penalties.py +++ b/pygam/tests/test_penalties.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- import numpy as np -import pytest -from pygam import * +from pygam import LinearGAM, s from pygam.penalties import derivative from pygam.penalties import l2 @@ -23,33 +22,35 @@ def test_single_spline_penalty(): l2 should penalty be 1. monotonic_ and convexity_ should be 0. """ - coef = np.array(1.) - assert(np.alltrue(derivative(1, coef).A == 0.)) - assert(np.alltrue(l2(1, coef).A == 1.)) - assert(np.alltrue(monotonic_inc(1, coef).A == 0.)) - assert(np.alltrue(monotonic_dec(1, coef).A == 0.)) - assert(np.alltrue(convex(1, coef).A == 0.)) - assert(np.alltrue(concave(1, coef).A == 0.)) - assert(np.alltrue(none(1, coef).A == 0.)) + coef = np.array(1.0) + assert np.alltrue(derivative(1, coef).A == 0.0) + assert np.alltrue(l2(1, coef).A == 1.0) + assert np.alltrue(monotonic_inc(1, coef).A == 0.0) + assert np.alltrue(monotonic_dec(1, coef).A == 0.0) + assert np.alltrue(convex(1, coef).A == 0.0) + assert np.alltrue(concave(1, coef).A == 0.0) + assert np.alltrue(none(1, coef).A == 0.0) + def test_wrap_penalty(): """ check that wrap penalty indeed reduces inserts the desired penalty into the linear term when fit_linear is True, and 0, when fit_linear is False. """ - coef = np.array(1.) + coef = np.array(1.0) n = 2 linear_penalty = -1 fit_linear = True p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty) P = p(n, coef).A - assert(P.sum() == linear_penalty) + assert P.sum() == linear_penalty fit_linear = False p = wrap_penalty(none, fit_linear, linear_penalty=linear_penalty) P = p(n, coef).A - assert(P.sum() == 0.) + assert P.sum() == 0.0 + def test_monotonic_inchepatitis_X_y(hepatitis_X_y): """ @@ -63,7 +64,8 @@ def test_monotonic_inchepatitis_X_y(hepatitis_X_y): XX = gam.generate_X_grid(term=0) Y = gam.predict(np.sort(XX)) diffs = np.diff(Y, n=1) - assert(((diffs >= 0) + np.isclose(diffs, 0.)).all()) + assert ((diffs >= 0) + np.isclose(diffs, 0.0)).all() + def test_monotonic_dec(hepatitis_X_y): """ @@ -77,7 +79,8 @@ def test_monotonic_dec(hepatitis_X_y): XX = gam.generate_X_grid(term=0) Y = gam.predict(np.sort(XX)) diffs = np.diff(Y, n=1) - assert(((diffs <= 0) + np.isclose(diffs, 0.)).all()) + assert ((diffs <= 0) + np.isclose(diffs, 0.0)).all() + def test_convex(hepatitis_X_y): """ @@ -91,7 +94,8 @@ def test_convex(hepatitis_X_y): XX = gam.generate_X_grid(term=0) Y = gam.predict(np.sort(XX)) diffs = np.diff(Y, n=2) - assert(((diffs >= 0) + np.isclose(diffs, 0.)).all()) + assert ((diffs >= 0) + np.isclose(diffs, 0.0)).all() + def test_concave(hepatitis_X_y): """ @@ -105,7 +109,7 @@ def test_concave(hepatitis_X_y): XX = gam.generate_X_grid(term=0) Y = gam.predict(np.sort(XX)) diffs = np.diff(Y, n=2) - assert(((diffs <= 0) + np.isclose(diffs, 0.)).all()) + assert ((diffs <= 0) + np.isclose(diffs, 0.0)).all() # TODO penalties gives expected matrix structure diff --git a/pygam/tests/test_terms.py b/pygam/tests/test_terms.py index e4a5c5a4..46d0f172 100644 --- a/pygam/tests/test_terms.py +++ b/pygam/tests/test_terms.py @@ -5,25 +5,39 @@ import numpy as np import pytest -from pygam import * -from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList +from pygam import LinearGAM, PoissonGAM + +from pygam.terms import ( + Term, + Intercept, + SplineTerm, + LinearTerm, + FactorTerm, + TensorTerm, + TermList, + s, + te, + l, + f, +) from pygam.utils import flatten + @pytest.fixture def chicago_gam(chicago_X_y): X, y = chicago_X_y gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y) return gam + def test_wrong_length(): - """iterable params must all match lengths - """ + """iterable params must all match lengths""" with pytest.raises(ValueError): SplineTerm(0, lam=[0, 1, 2], penalties=['auto', 'auto']) + def test_num_coefs(mcycle_X_y, wage_X_y): - """make sure this method gives correct values - """ + """make sure this method gives correct values""" X, y = mcycle_X_y term = Intercept().compile(X) @@ -35,7 +49,6 @@ def test_num_coefs(mcycle_X_y, wage_X_y): term = SplineTerm(0).compile(X) assert term.n_coefs == term.n_splines - X, y = wage_X_y term = FactorTerm(2).compile(X) assert term.n_coefs == 5 @@ -45,6 +58,7 @@ def test_num_coefs(mcycle_X_y, wage_X_y): term = TensorTerm(term_a, term_b).compile(X) assert term.n_coefs == term_a.n_coefs * term_b.n_coefs + def test_term_list_removes_duplicates(): """prove that we remove duplicated terms""" term = SplineTerm(0) @@ -53,18 +67,20 @@ def test_term_list_removes_duplicates(): assert isinstance(term_list, TermList) assert len(term_list) == 1 + def test_tensor_invariance_to_scaling(chicago_gam, chicago_X_y): - """a model with tensor terms should give results regardless of input scaling - """ + """a model with tensor terms should give results regardless of input scaling""" X, y = chicago_X_y X[:, 3] = X[:, 3] * 100 gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y) assert np.allclose(gam.coef_, chicago_gam.coef_, atol=1e-6) + def test_tensor_must_have_at_least_2_marginal_terms(): with pytest.raises(ValueError): te(0) + def test_tensor_term_expands_args_to_match_penalties_and_terms(): tensor = te(0, 1, lam=3) assert len(tensor.lam) == 2 @@ -78,15 +94,18 @@ def test_tensor_term_expands_args_to_match_penalties_and_terms(): assert len(tensor.lam) == 2 assert len(flatten(tensor.lam)) == 3 + def test_tensor_term_skips_kwargs_when_marginal_term_is_supplied(): tensor = te(0, s(1), n_splines=420) assert tensor._terms[0].n_coefs == 420 assert tensor._terms[1].n_coefs != 420 + def test_tensor_term_doesnt_accept_tensor_terms(): with pytest.raises(ValueError): te(l(0), te(0, 1)) + def test_tensor_args_length_must_agree_with_number_of_terms(): with pytest.raises(ValueError): te(0, 1, lam=[3]) @@ -97,19 +116,18 @@ def test_tensor_args_length_must_agree_with_number_of_terms(): with pytest.raises(ValueError): te(0, 1, lam=[3, 3, 3]) + def test_build_from_info(): - """we can rebuild terms from info - """ - terms = [Intercept(), - LinearTerm(0), - SplineTerm(0), - FactorTerm(0), - TensorTerm(0,1)] + """we can rebuild terms from info""" + terms = [Intercept(), LinearTerm(0), SplineTerm(0), FactorTerm(0), TensorTerm(0, 1)] for term in terms: assert Term.build_from_info(term.info) == term - assert te(0, 1) == TensorTerm(SplineTerm(0, n_splines=10), SplineTerm(1, n_splines=10)) + assert te(0, 1) == TensorTerm( + SplineTerm(0, n_splines=10), SplineTerm(1, n_splines=10) + ) + def test_by_variable(): """our fit on the toy tensor dataset with a by variable on the linear feature @@ -118,25 +136,27 @@ def test_by_variable(): """ pass + def test_by_variable_doesnt_exist_in_X(mcycle_X_y): - """raises a value error if we cannot locate the by variable - """ + """raises a value error if we cannot locate the by variable""" term = s(0, by=1) with pytest.raises(ValueError): term.compile(mcycle_X_y[0]) + def test_term_list_from_info(): - """we can remake a term list from info - """ + """we can remake a term list from info""" term_list = SplineTerm(0) + LinearTerm(1) assert Term.build_from_info(term_list.info) == term_list + def test_term_list_only_accepts_terms_or_term_list(): TermList() with pytest.raises(ValueError): TermList(None) + def test_pop_term_from_term_list(): term_list = SplineTerm(0) + LinearTerm(1) + Intercept() term_list_2 = deepcopy(term_list) @@ -149,9 +169,9 @@ def test_pop_term_from_term_list(): with pytest.raises(ValueError): term_list_2.pop(1) == term_list[0] + def test_no_multiply(): - """trying to multiply terms raises an error - """ + """trying to multiply terms raises an error""" with pytest.raises(NotImplementedError): SplineTerm(0) * LinearTerm(1) @@ -159,10 +179,11 @@ def test_no_multiply(): with pytest.raises(NotImplementedError): term_list * term_list + def test_by_is_similar_to_tensor_with_linear_term(toy_interaction_X_y): """for simple interactions we can acheive equivalent fits using: - - a spline with a by-variable - - a tensor between spline and a linear term + - a spline with a by-variable + - a tensor between spline and a linear term """ X, y = toy_interaction_X_y @@ -174,6 +195,7 @@ def test_by_is_similar_to_tensor_with_linear_term(toy_interaction_X_y): assert np.allclose(r2_a, r2_b) + def test_correct_smoothing_in_tensors(toy_interaction_X_y): """check that smoothing penalties are correctly computed across the marginal dimensions @@ -191,6 +213,7 @@ def test_correct_smoothing_in_tensors(toy_interaction_X_y): gam = LinearGAM(te(0, 1, lam=[10000, 0.6])).fit(X, y) assert gam.statistics_['pseudo_r2']['explained_deviance'] < 0.1 + def test_dummy_encoding(wage_X_y, wage_gam): """check that dummy encoding produces fewer coefficients than one-hot""" X, y = wage_X_y @@ -203,6 +226,7 @@ def test_dummy_encoding(wage_X_y, wage_gam): assert wage_gam._modelmat(X=X, term=2).shape[1] == 5 assert wage_gam.terms[2].n_coefs == 5 + def test_build_cyclic_p_spline(hepatitis_X_y): """check the cyclic p spline builds @@ -220,6 +244,7 @@ def test_build_cyclic_p_spline(hepatitis_X_y): assert r_unconstrained > r_cyclic + def test_cyclic_p_spline_periodicity(hepatitis_X_y): """check the cyclic p spline behavioves periodically @@ -234,11 +259,12 @@ def test_cyclic_p_spline_periodicity(hepatitis_X_y): # check periodicity left = gam.edge_knots_[0][1] right = gam.edge_knots_[0][1] - assert(gam.predict(left) == gam.predict(right)) + assert gam.predict(left) == gam.predict(right) # check extrapolation further = right + (right - left) - assert(gam.predict(further) == gam.predict(right)) + assert gam.predict(further) == gam.predict(right) + def test_cyclic_p_spline_custom_period(): """show that we can set custom edge_knots, and that these affect our model's @@ -255,10 +281,13 @@ def test_cyclic_p_spline_custom_period(): assert np.allclose(gam.edge_knots_[0], [0, 1]) # when modeling a non-periodic function, our periodic model fails - gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5])).fit(X, y) + gam = LinearGAM( + s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5]) + ).fit(X, y) assert np.allclose(gam.predict(X), 0.5) assert np.allclose(gam.edge_knots_[0], [0, 0.5]) + def test_tensor_terms_have_constraints(toy_interaction_X_y): """test that we can fit a gam with constrained tensor terms, even if those constraints are 'none' @@ -269,6 +298,7 @@ def test_tensor_terms_have_constraints(toy_interaction_X_y): assert gam._is_fitted assert gam.terms.hasconstraint + def test_tensor_composite_constraints_equal_penalties(): """check that the composite constraint matrix for a tensor term is equivalent to a penalty matrix under the correct conditions @@ -276,48 +306,46 @@ def test_tensor_composite_constraints_equal_penalties(): from pygam.penalties import derivative def der1(*args, **kwargs): - kwargs.update({'derivative':1}) + kwargs.update({'derivative': 1}) return derivative(*args, **kwargs) # create a 3D tensor where the penalty should be equal to the constraint - term = te(0, 1, 2, - n_splines=[4, 5, 6], - penalties=der1, - lam=1, - constraints='monotonic_inc') + term = te( + 0, 1, 2, n_splines=[4, 5, 6], penalties=der1, lam=1, constraints='monotonic_inc' + ) # check all the dimensions for i in range(3): P = term._build_marginal_penalties(i).A - C = term._build_marginal_constraints(i, - -np.arange(term.n_coefs), - constraint_lam=1, - constraint_l2=0).A + C = term._build_marginal_constraints( + i, -np.arange(term.n_coefs), constraint_lam=1, constraint_l2=0 + ).A assert (P == C).all() + def test_tensor_with_constraints(hepatitis_X_y): """we should be able to fit a gam with not 'none' constraints on a tensor term and observe its effect in reducing the R2 of the fit """ X, y = hepatitis_X_y - X = np.c_[X, np.random.randn(len(X))] # add a random interaction data + X = np.c_[X, np.random.randn(len(X))] # add a random interaction data # constrain useless dimension - gam_useless_constraint = LinearGAM(te(0, 1, - constraints=['none', 'monotonic_dec'], - n_splines=[20, 4])) + gam_useless_constraint = LinearGAM( + te(0, 1, constraints=['none', 'monotonic_dec'], n_splines=[20, 4]) + ) gam_useless_constraint.fit(X, y) # constrain informative dimension - gam_constrained = LinearGAM(te(0, 1, - constraints=['monotonic_dec', 'none'], - n_splines=[20, 4])) + gam_constrained = LinearGAM( + te(0, 1, constraints=['monotonic_dec', 'none'], n_splines=[20, 4]) + ) gam_constrained.fit(X, y) assert gam_useless_constraint.statistics_['pseudo_r2']['explained_deviance'] > 0.5 assert gam_constrained.statistics_['pseudo_r2']['explained_deviance'] < 0.1 - + class TestRegressions(object): def test_no_auto_dtype(self): @@ -344,12 +372,14 @@ def test_compose_penalties(self): def test_compose_constraints(self, hepatitis_X_y): """we should be able to compose penalties - here we show that a gam with a monotonic increasing penalty composed with a monotonic decreasing - penalty is equivalent to a gam with only an intercept + here we show that a gam with a monotonic increasing penalty composed with a + monotonic decreasing penalty is equivalent to a gam with only an intercept """ X, y = hepatitis_X_y - gam_compose = LinearGAM(s(0, constraints=['monotonic_inc', 'monotonic_dec'])).fit(X, y) + gam_compose = LinearGAM( + s(0, constraints=['monotonic_inc', 'monotonic_dec']) + ).fit(X, y) gam_intercept = LinearGAM(terms=None).fit(X, y) assert np.allclose(gam_compose.coef_[-1], gam_intercept.coef_) diff --git a/pygam/tests/test_utils.py b/pygam/tests/test_utils.py index 39077f80..2a06e86c 100644 --- a/pygam/tests/test_utils.py +++ b/pygam/tests/test_utils.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from copy import deepcopy + try: # py >= 3.3 from unittest.mock import patch @@ -11,42 +12,46 @@ import numpy as np import pytest -from pygam import * +from pygam import LinearGAM, LogisticGAM, s, f from pygam.utils import check_X, check_y, check_X_y, sig_code, check_iterable_depth # TODO check dtypes works as expected # TODO checkX, checky, check XY expand as needed, call out bad domain + @pytest.fixture def wage_gam(wage_X_y): X, y = wage_X_y - gam = LinearGAM(s(0) + s(1) + f(2)).fit(X,y) + gam = LinearGAM(s(0) + s(1) + f(2)).fit(X, y) return gam + @pytest.fixture def default_gam(default_X_y): X, y = default_X_y - gam = LogisticGAM().fit(X,y) + gam = LogisticGAM().fit(X, y) return gam + def test_check_X_categorical_prediction_exceeds_training(wage_X_y, wage_gam): """ if our categorical variable is outside the training range we should get an error """ - X, y = wage_X_y # last feature is categorical + X, y = wage_X_y # last feature is categorical gam = wage_gam # get edge knots for last feature eks = gam.edge_knots_[-1] - # add 1 to all Xs, thus pushing some X past the max value - X[:,-1] = eks[-1] + 1 + # add 1 to all Xs, thus pushing some X past the max value + X[:, -1] = eks[-1] + 1 with pytest.raises(ValueError): gam.predict(X) + def test_check_y_not_int_not_float(wage_X_y, wage_gam): """y must be int or float, or we should get a value error""" X, y = wage_X_y @@ -55,6 +60,7 @@ def test_check_y_not_int_not_float(wage_X_y, wage_gam): with pytest.raises(ValueError): check_y(y_str, wage_gam.link, wage_gam.distribution) + def test_check_y_casts_to_numerical(wage_X_y, wage_gam): """check_y will try to cast data to numerical types""" X, y = wage_X_y @@ -69,34 +75,45 @@ def test_check_y_not_min_samples(wage_X_y, wage_gam): X, y = wage_X_y with pytest.raises(ValueError): - check_y(y, wage_gam.link, wage_gam.distribution, min_samples=len(y)+1, verbose=False) + check_y( + y, + wage_gam.link, + wage_gam.distribution, + min_samples=len(y) + 1, + verbose=False, + ) + def test_check_y_not_in_domain_link(default_X_y, default_gam): """if you give labels outide of the links domain, check_y will raise an error""" X, y = default_X_y - gam = default_gam with pytest.raises(ValueError): - check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False) + check_y(y + 0.1, default_gam.link, default_gam.distribution, verbose=False) + def test_check_X_not_int_not_float(): """X must be an in or a float""" with pytest.raises(ValueError): check_X(['hi'], verbose=False) + def test_check_X_too_many_dims(): """check_X accepts at most 2D inputs""" with pytest.raises(ValueError): - check_X(np.ones((5,4,3))) + check_X(np.ones((5, 4, 3))) + def test_check_X_not_min_samples(): with pytest.raises(ValueError): check_X(np.ones((5)), min_samples=6, verbose=False) + def test_check_X_y_different_lengths(): with pytest.raises(ValueError): check_X_y(np.ones(5), np.ones(4)) + def test_input_data_after_fitting(mcycle_X_y): """ our check_X and check_y functions should be invoked @@ -164,6 +181,7 @@ def test_input_data_after_fitting(mcycle_X_y): with pytest.raises(ValueError): gam.sample(X, y, weights=weights_nan, n_bootstraps=2) + def test_catch_chol_pos_def_error(default_X_y): """ regresion test @@ -171,7 +189,8 @@ def test_catch_chol_pos_def_error(default_X_y): doing a gridsearch with a poorly conditioned penalty matrix should not crash """ X, y = default_X_y - gam = LogisticGAM().gridsearch(X, y, lam=np.logspace(10, 12, 3)) + LogisticGAM().gridsearch(X, y, lam=np.logspace(10, 12, 3)) + def test_pvalue_sig_codes(): """make sure we get the codes we exepct""" @@ -184,6 +203,7 @@ def test_pvalue_sig_codes(): assert sig_code(0.0501) == '.' assert sig_code(0.101) == ' ' + def test_b_spline_basis_extrapolates(mcycle_X_y): X, y = mcycle_X_y gam = LinearGAM().fit(X, y) @@ -204,19 +224,24 @@ def test_b_spline_basis_extrapolates(mcycle_X_y): assert np.allclose(slopes[0], slopes[1], atol=1e-4) + def test_iterable_depth(): it = [[[3]]] assert check_iterable_depth(it) == 3 assert check_iterable_depth(it, max_depth=2) == 2 + def test_no_SKSPIMPORT(mcycle_X_y): - """make sure our module work with and without scikit-sparse - """ + """make sure our module work with and without scikit-sparse""" from pygam.utils import SKSPIMPORT + if SKSPIMPORT: - with patch('pygam.utils.SKSPIMPORT', new=False) as SKSPIMPORT_patch: + with patch( + 'pygam.utils.SKSPIMPORT', new=False + ) as SKSPIMPORT_patch: # noqa: E501, F841 from pygam.utils import SKSPIMPORT - assert SKSPIMPORT == False + + assert SKSPIMPORT is False X, y = mcycle_X_y assert LinearGAM().fit(X, y)._is_fitted diff --git a/pygam/utils.py b/pygam/utils.py index ff5ea004..994de394 100644 --- a/pygam/utils.py +++ b/pygam/utils.py @@ -9,28 +9,28 @@ import warnings import scipy as sp -from scipy import sparse +from scipy import sparse # noqa: F401 import numpy as np from numpy.linalg import LinAlgError try: - from sksparse.cholmod import cholesky as spcholesky - from sksparse.test_cholmod import CholmodNotPositiveDefiniteError - SKSPIMPORT = True + from sksparse.cholmod import cholesky as spcholesky + from sksparse.test_cholmod import CholmodNotPositiveDefiniteError + + SKSPIMPORT = True except ImportError: - SKSPIMPORT = False + SKSPIMPORT = False class NotPositiveDefiniteError(ValueError): - """Exception class to raise if a matrix is not positive definite - """ + """Exception class to raise if a matrix is not positive definite""" + class OptimizationError(ValueError): - """Exception class to raise if PIRLS optimization fails - """ + """Exception class to raise if PIRLS optimization fails""" -def cholesky(A, sparse=True, verbose=True): +def cholesky(A, sparse=True, verbose=True): # noqa: F811 """ Choose the best possible cholesky factorizor. @@ -61,19 +61,21 @@ def cholesky(A, sparse=True, verbose=True): # permute L = F.L() L = P.T.dot(L) - except CholmodNotPositiveDefiniteError as e: + except CholmodNotPositiveDefiniteError: raise NotPositiveDefiniteError('Matrix is not positive definite') if sparse: - return L.T # upper triangular factorization - return L.T.A # upper triangular factorization + return L.T # upper triangular factorization + return L.T.A # upper triangular factorization else: - msg = 'Could not import Scikit-Sparse or Suite-Sparse.\n'\ - 'This will slow down optimization for models with '\ - 'monotonicity/convexity penalties and many splines.\n'\ - 'See installation instructions for installing '\ - 'Scikit-Sparse and Suite-Sparse via Conda.' + msg = ( + 'Could not import Scikit-Sparse or Suite-Sparse.\n' + 'This will slow down optimization for models with ' + 'monotonicity/convexity penalties and many splines.\n' + 'See installation instructions for installing ' + 'Scikit-Sparse and Suite-Sparse via Conda.' + ) if verbose: warnings.warn(msg) @@ -82,7 +84,7 @@ def cholesky(A, sparse=True, verbose=True): try: L = sp.linalg.cholesky(A, lower=False) - except LinAlgError as e: + except LinAlgError: raise NotPositiveDefiniteError('Matrix is not positive definite') if sparse: @@ -107,16 +109,24 @@ def make_2d(array, verbose=True): """ array = np.asarray(array) if array.ndim < 2: - msg = 'Expected 2D input data array, but found {}D. '\ - 'Expanding to 2D.'.format(array.ndim) + msg = 'Expected 2D input data array, but found {}D. ' 'Expanding to 2D.'.format( + array.ndim + ) if verbose: warnings.warn(msg) - array = np.atleast_1d(array)[:,None] + array = np.atleast_1d(array)[:, None] return array -def check_array(array, force_2d=False, n_feats=None, ndim=None, - min_samples=1, name='Input data', verbose=True): +def check_array( + array, + force_2d=False, + n_feats=None, + ndim=None, + min_samples=1, + name='Input data', + verbose=True, +): """ tool to perform basic data validation. called by check_X and check_y. @@ -160,34 +170,42 @@ def check_array(array, force_2d=False, n_feats=None, ndim=None, if dtype.kind not in ['i', 'f']: try: array = array.astype('float') - except ValueError as e: - raise ValueError('{} must be type int or float, '\ - 'but found type: {}\n'\ - 'Try transforming data with a LabelEncoder first.'\ - .format(name, dtype.type)) + except ValueError: + raise ValueError( + '{} must be type int or float, ' + 'but found type: {}\n' + 'Try transforming data with a LabelEncoder first.'.format( + name, dtype.type + ) + ) # check finite - if not(np.isfinite(array).all()): + if not (np.isfinite(array).all()): raise ValueError('{} must not contain Inf nor NaN'.format(name)) # check ndim if ndim is not None: if array.ndim != ndim: - raise ValueError('{} must have {} dimensions. '\ - 'found shape {}'.format(name, ndim, array.shape)) + raise ValueError( + '{} must have {} dimensions. ' + 'found shape {}'.format(name, ndim, array.shape) + ) # check n_feats if n_feats is not None: m = array.shape[1] if m != n_feats: - raise ValueError('{} must have {} features, '\ - 'but found {}'.format(name, n_feats, m)) + raise ValueError( + '{} must have {} features, ' 'but found {}'.format(name, n_feats, m) + ) # minimum samples n = array.shape[0] if n < min_samples: - raise ValueError('{} should have at least {} samples, '\ - 'but found {}'.format(name, min_samples, n)) + raise ValueError( + '{} should have at least {} samples, ' + 'but found {}'.format(name, min_samples, n) + ) return array @@ -215,22 +233,39 @@ def check_y(y, link, dist, min_samples=1, verbose=True): """ y = np.ravel(y) - y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1, - name='y data', verbose=verbose) + y = check_array( + y, + force_2d=False, + min_samples=min_samples, + ndim=1, + name='y data', + verbose=verbose, + ) with warnings.catch_warnings(): warnings.simplefilter("ignore") - + if np.any(np.isnan(link.link(y, dist))): - raise ValueError('y data is not in domain of {} link function. ' \ - 'Expected domain: {}, but found {}' \ - .format(link, get_link_domain(link, dist), - [float('%.2f'%np.min(y)), - float('%.2f'%np.max(y))])) + raise ValueError( + 'y data is not in domain of {} link function. ' + 'Expected domain: {}, but found {}'.format( + link, + get_link_domain(link, dist), + [float('%.2f' % np.min(y)), float('%.2f' % np.max(y))], + ) + ) return y -def check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None, - features=None, verbose=True): + +def check_X( + X, + n_feats=None, + min_samples=1, + edge_knots=None, + dtypes=None, + features=None, + verbose=True, +): """ tool to ensure that X: - is 2 dimensional @@ -269,16 +304,21 @@ def check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None, n_feats = max(n_feats, max_feat) # basic diagnostics - X = check_array(X, force_2d=True, n_feats=n_feats, min_samples=min_samples, - name='X data', verbose=verbose) + X = check_array( + X, + force_2d=True, + n_feats=n_feats, + min_samples=min_samples, + name='X data', + verbose=verbose, + ) # check our categorical data has no new categories if (edge_knots is not None) and (dtypes is not None) and (features is not None): - # get a flattened list of tuples edge_knots = flatten(edge_knots)[::-1] dtypes = flatten(dtypes) - assert len(edge_knots) % 2 == 0 # sanity check + assert len(edge_knots) % 2 == 0 # sanity check # form pairs n = len(edge_knots) // 2 @@ -293,17 +333,20 @@ def check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None, if dt == 'categorical': min_ = ek[0] max_ = ek[-1] - if (np.unique(x) < min_).any() or \ - (np.unique(x) > max_).any(): - min_ += .5 + if (np.unique(x) < min_).any() or (np.unique(x) > max_).any(): + min_ += 0.5 max_ -= 0.5 - raise ValueError('X data is out of domain for categorical '\ - 'feature {}. Expected data on [{}, {}], '\ - 'but found data on [{}, {}]'\ - .format(i, min_, max_, x.min(), x.max())) + raise ValueError( + 'X data is out of domain for categorical ' + 'feature {}. Expected data on [{}, {}], ' + 'but found data on [{}, {}]'.format( + i, min_, max_, x.min(), x.max() + ) + ) return X + def check_X_y(X, y): """ tool to ensure input and output data have the same number of samples @@ -318,8 +361,11 @@ def check_X_y(X, y): None """ if len(X) != len(y): - raise ValueError('Inconsistent input and output data shapes. '\ - 'found X: {} and y: {}'.format(X.shape, y.shape)) + raise ValueError( + 'Inconsistent input and output data shapes. ' + 'found X: {} and y: {}'.format(X.shape, y.shape) + ) + def check_lengths(*arrays): """ @@ -338,8 +384,7 @@ def check_lengths(*arrays): raise ValueError('Inconsistent data lengths: {}'.format(lengths)) -def check_param(param, param_name, dtype, constraint=None, iterable=True, - max_depth=2): +def check_param(param, param_name, dtype, constraint=None, iterable=True, max_depth=2): """ checks the dtype of a parameter, and whether it satisfies a numerical contraint @@ -362,10 +407,15 @@ def check_param(param, param_name, dtype, constraint=None, iterable=True, list of validated and converted parameter(s) """ msg = [] - msg.append(param_name + " must be "+ dtype) + msg.append(param_name + " must be " + dtype) if iterable: - msg.append(" or nested iterable of depth " + str(max_depth) + - " containing " + dtype + "s") + msg.append( + " or nested iterable of depth " + + str(max_depth) + + " containing " + + dtype + + "s" + ) msg.append(", but found " + param_name + " = {}".format(repr(param))) @@ -376,7 +426,9 @@ def check_param(param, param_name, dtype, constraint=None, iterable=True, # check param is numerical try: - param_dt = np.array(flatten(param))# + np.zeros_like(flatten(param), dtype='int') + param_dt = np.array( + flatten(param) + ) # + np.zeros_like(flatten(param), dtype='int') # param_dt = np.array(param).astype(dtype) except (ValueError, TypeError): raise TypeError(msg) @@ -399,6 +451,7 @@ def check_param(param, param_name, dtype, constraint=None, iterable=True, return param + def get_link_domain(link, dist): """ tool to identify the domain of a given monotonic link function @@ -418,15 +471,15 @@ def get_link_domain(link, dist): def load_diagonal(cov, load=None): - """Return the given square matrix with a small amount added to the diagonal - to make it positive semi-definite. - """ - n, m = cov.shape - assert n == m, "matrix must be square, but found shape {}".format((n, m)) + """Return the given square matrix with a small amount added to the diagonal + to make it positive semi-definite. + """ + n, m = cov.shape + assert n == m, "matrix must be square, but found shape {}".format((n, m)) - if load is None: - load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon - return cov + np.eye(n) * load + if load is None: + load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon + return cov + np.eye(n) * load def round_to_n_decimal_places(array, n=3): @@ -445,17 +498,19 @@ def round_to_n_decimal_places(array, n=3): array : rounded np.array """ # check if in scientific notation - if issubclass(array.__class__, float) and '%.e'%array == str(array): - return array # do nothing + if issubclass(array.__class__, float) and '%.e' % array == str(array): + return array # do nothing shape = np.shape(array) - out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n)) + out = (np.atleast_1d(array) * 10**n).round().astype('int') / (10.0**n) return out.reshape(shape) -# Credit to Hugh Bothwell from http://stackoverflow.com/questions/5084743/how-to-print-pretty-string-output-in-python +# Credit to Hugh Bothwell from +# http://stackoverflow.com/questions/5084743/how-to-print-pretty-string-output-in-python class TablePrinter(object): "Print a list of dicts as a table" + def __init__(self, fmt, sep=' ', ul=None): """ @param fmt: list of tuple(heading, key, width) @@ -464,18 +519,25 @@ def __init__(self, fmt, sep=' ', ul=None): width: int, column width in chars @param sep: string, separation between columns @param ul: string, character to underline column label, or None for no underlining - """ - super(TablePrinter,self).__init__() - self.fmt = str(sep).join('{lb}{0}:{1}{rb}'.format(key, width, lb='{', rb='}') for heading,key,width in fmt) - self.head = {key:heading for heading,key,width in fmt} - self.ul = {key:str(ul)*width for heading,key,width in fmt} if ul else None - self.width = {key:width for heading,key,width in fmt} + """ # noqa: E501 + super(TablePrinter, self).__init__() + self.fmt = str(sep).join( + '{lb}{0}:{1}{rb}'.format(key, width, lb='{', rb='}') + for heading, key, width in fmt + ) + self.head = {key: heading for heading, key, width in fmt} + self.ul = {key: str(ul) * width for heading, key, width in fmt} if ul else None + self.width = {key: width for heading, key, width in fmt} def row(self, data): if sys.version_info < (3,): - return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.iteritems() }) + return self.fmt.format( + **{k: str(data.get(k, ''))[:w] for k, w in self.width.iteritems()} + ) else: - return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.items() }) + return self.fmt.format( + **{k: str(data.get(k, ''))[:w] for k, w in self.width.items()} + ) def __call__(self, dataList): _r = self.row @@ -508,12 +570,13 @@ def space_row(left, right, filler=' ', total_width=-1): filler = str(filler)[:1] if total_width < 0: - spacing = - total_width + spacing = -total_width else: spacing = total_width - len(left) - len(right) return left + filler * spacing + right + def sig_code(p_value): """create a significance code in the style of R's lm @@ -536,6 +599,7 @@ def sig_code(p_value): return '.' return ' ' + def gen_edge_knots(data, dtype, verbose=True): """ generate uniform knots from data including the edges of the data @@ -560,13 +624,23 @@ def gen_edge_knots(data, dtype, verbose=True): else: knots = np.r_[np.min(data), np.max(data)] if knots[0] == knots[1] and verbose: - warnings.warn('Data contains constant feature. '\ - 'Consider removing and setting fit_intercept=True', - stacklevel=2) + warnings.warn( + 'Data contains constant feature. ' + 'Consider removing and setting fit_intercept=True', + stacklevel=2, + ) return knots -def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, - periodic=True, verbose=True): + +def b_spline_basis( + x, + edge_knots, + n_splines=20, + spline_order=3, + sparse=True, # noqa: F811 + periodic=True, + verbose=True, +): """ tool to generate b-spline basis using vectorized De Boor recursion the basis functions extrapolate linearly past the end-knots. @@ -592,8 +666,7 @@ def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, with shape (len(x), n_splines) """ if np.ravel(x).ndim != 1: - raise ValueError('Data must be 1-D, but found {}'\ - .format(np.ravel(x).ndim)) + raise ValueError('Data must be 1-D, but found {}'.format(np.ravel(x).ndim)) if (n_splines < 1) or not isinstance(n_splines, numbers.Integral): raise ValueError('n_splines must be int >= 1') @@ -602,13 +675,18 @@ def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, raise ValueError('spline_order must be int >= 1') if n_splines < spline_order + 1: - raise ValueError('n_splines must be >= spline_order + 1. '\ - 'found: n_splines = {} and spline_order = {}'\ - .format(n_splines, spline_order)) + raise ValueError( + 'n_splines must be >= spline_order + 1. ' + 'found: n_splines = {} and spline_order = {}'.format( + n_splines, spline_order + ) + ) if n_splines == 0 and verbose: - warnings.warn('Requested 1 spline. This is equivalent to '\ - 'fitting an intercept', stacklevel=2) + warnings.warn( + 'Requested 1 spline. This is equivalent to ' 'fitting an intercept', + stacklevel=2, + ) n_splines += spline_order * periodic @@ -629,28 +707,24 @@ def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, x = x % (1 + 1e-9) # append 0 and 1 in order to get derivatives for extrapolation - x = np.r_[x, 0., 1.] + x = np.r_[x, 0.0, 1.0] # determine extrapolation indices - x_extrapolte_l = (x < 0) - x_extrapolte_r = (x > 1) + x_extrapolte_l = x < 0 + x_extrapolte_r = x > 1 x_interpolate = ~(x_extrapolte_r + x_extrapolte_l) # formatting x = np.atleast_2d(x).T - n = len(x) # augment knots aug = np.arange(1, spline_order + 1) * diff - aug_knots = np.r_[-aug[::-1], - boundary_knots, - 1 + aug] - aug_knots[-1] += 1e-9 # want last knot inclusive + aug_knots = np.r_[-aug[::-1], boundary_knots, 1 + aug] + aug_knots[-1] += 1e-9 # want last knot inclusive # prepare Haar Basis - bases = (x >= aug_knots[:-1]).astype(np.int) * \ - (x < aug_knots[1:]).astype(np.int) - bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1 + bases = (x >= aug_knots[:-1]).astype(int) * (x < aug_knots[1:]).astype(int) + bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1 # do recursion from Hastie et al. vectorized maxi = len(aug_knots) - 1 @@ -658,15 +732,15 @@ def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, maxi -= 1 # left sub-basis - num = (x - aug_knots[:maxi]) + num = x - aug_knots[:maxi] num *= bases[:, :maxi] - denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi] - left = num/denom + denom = aug_knots[m - 1 : maxi + m - 1] - aug_knots[:maxi] + left = num / denom # right sub-basis - num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1] - denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1] - right = num/denom + num = (aug_knots[m : maxi + m] - x) * bases[:, 1 : maxi + 1] + denom = aug_knots[m : maxi + m] - aug_knots[1 : maxi + 1] + right = num / denom # track previous bases and update prev_bases = bases[-2:] @@ -674,22 +748,22 @@ def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True, if periodic and spline_order > 0: # make spline domain periodic - bases[:, :spline_order] = np.max([bases[:, :spline_order], - bases[:, -spline_order:]], - axis=0) + bases[:, :spline_order] = np.max( + [bases[:, :spline_order], bases[:, -spline_order:]], axis=0 + ) # remove extra splines used only for ensuring correct domain bases = bases[:, :-spline_order] # extrapolate # since we have repeated end-knots, only the last 2 basis functions are # non-zero at the end-knots, and they have equal and opposite gradient. - if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0: - bases[~x_interpolate] = 0. + if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order > 0: + bases[~x_interpolate] = 0.0 - denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1]) + denom = aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1] left = prev_bases[:, :-1] / denom - denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order]) + denom = aug_knots[spline_order + 1 :] - aug_knots[1:-spline_order] right = prev_bases[:, 1:] / denom grads = (spline_order) * (left - right) @@ -722,7 +796,7 @@ def ylogydu(y, u): ------- np.array len(n) """ - mask = (np.atleast_1d(y)!=0.) + mask = np.atleast_1d(y) != 0.0 out = np.zeros_like(u) out[mask] = y[mask] * np.log(y[mask] / u[mask]) return out @@ -754,6 +828,7 @@ def combine(*args): else: return [[arg] for arg in args[0]] + def isiterable(obj, reject_string=True): """convenience tool to detect if something is iterable. in python3, strings count as iterables to we have the option to exclude them @@ -768,13 +843,14 @@ def isiterable(obj, reject_string=True): bool, if the object is itereable. """ - iterable = hasattr(obj, '__len__') + iterable = hasattr(obj, '__len__') if reject_string: iterable = iterable and not isinstance(obj, str) return iterable + def check_iterable_depth(obj, max_depth=100): """find the maximum depth of nesting of the iterable @@ -788,6 +864,7 @@ def check_iterable_depth(obj, max_depth=100): ------- int """ + def find_iterables(obj): iterables = [] for item in obj: @@ -801,6 +878,7 @@ def find_iterables(obj): obj = find_iterables(obj) return depth + def flatten(iterable): """convenience tool to flatten any nested iterable @@ -861,8 +939,12 @@ def tensor_product(a, b, reshape=True): or (n, m_a, m_b) otherwise """ - assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim) - assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim) + assert ( + a.ndim == 2 + ), 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim) + assert ( + b.ndim == 2 + ), 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim) na, ma = a.shape nb, mb = b.shape diff --git a/pylintrc b/pylintrc deleted file mode 100644 index 4529d684..00000000 --- a/pylintrc +++ /dev/null @@ -1,408 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. This option is deprecated -# and it will be removed in Pylint 2.0. -optimize-ast=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -#disable=backtick,print-statement,intern-builtin,cmp-method,parameter-unpacking,import-star-module-level,zip-builtin-not-iterating,range-builtin-not-iterating,indexing-exception,old-division,useless-suppression,setslice-method,filter-builtin-not-iterating,reload-builtin,buffer-builtin,getslice-method,nonzero-method,suppressed-message,long-suffix,no-absolute-import,raw_input-builtin,xrange-builtin,basestring-builtin,metaclass-assignment,using-cmp-argument,input-builtin,old-ne-operator,apply-builtin,unicode-builtin,round-builtin,coerce-method,dict-iter-method,old-raise-syntax,delslice-method,hex-method,raising-string,old-octal-literal,next-method-called,unichr-builtin,coerce-builtin,map-builtin-not-iterating,oct-method,cmp-builtin,unpacking-in-except,reduce-builtin,standarderror-builtin,long-builtin,execfile-builtin,dict-view-method,file-builtin -disable=no-member,locally-disabled - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". This option is deprecated -# and it will be removed in Pylint 2.0. -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,f,x,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=100 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=10 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,future.builtins - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=30 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=45 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=30 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=1 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..119ade98 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,50 @@ +[tool.poetry] +name = "pygam" +version = "0.8.1" +description = "" +authors = ["Daniel Servén Marín", "Charlie Brummitt"] +license = "Apache-2.0" +readme = "README.md" + +[tool.poetry.dependencies] +python = ">=3.8.1, <3.12" +numpy = "^1.24.2" +scipy = "^1.10.1" +progressbar2 = "^4.2.0" + +[tool.poetry.group.dev.dependencies] +pytest = "^7.2.2" +flake8 = "^6.0.0" +codecov = "^2.1.12" +pytest-cov = "^4.0.0" +mock = "^5.0.1" +nbsphinx = "^0.9.0" +sphinx-rtd-theme = "^1.2.0" +sphinxcontrib-napoleon = "^0.7" +ipython = "^8.11.0" +pandas = "^1.5.3" +black = "^23.1.0" + +[tool.black] +line-length = 88 +skip-string-normalization = true +target-version = ['py39'] +include = '\.pyi?$' +exclude = ''' +/( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist +)/ +''' + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 609b7b93..00000000 --- a/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -codecov -mock -future -numpy -progressbar2 -pylint -pytest -pytest-cov -pytest-pylint -setuptools -scipy>=0.17 - -# Documentation Requirements -sphinxcontrib-napoleon # Parses numpy-style docstrings -nbsphinx # Converts notebooks to reStructuredText -ipython # For syntax highlighting notebooks -sphinx_rtd_theme # The Read The Docs theme