From 47c7b1704e8a7a986eb2199c898844dea0894280 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 16 Feb 2022 07:19:54 -0500 Subject: [PATCH] [pre-commit.ci] pre-commit autoupdate (#416) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 21.12b0 → 22.1.0](https://github.com/psf/black/compare/21.12b0...22.1.0) - [github.com/dfm/black_nbconvert: v0.3.0 → v0.4.0](https://github.com/dfm/black_nbconvert/compare/v0.3.0...v0.4.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pyproject.toml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dan Foreman-Mackey --- .pre-commit-config.yaml | 4 ++-- docs/tutorials/autocorr.ipynb | 2 +- docs/tutorials/line.ipynb | 6 +++--- docs/tutorials/monitor.ipynb | 4 ++-- docs/tutorials/parallel.ipynb | 6 +++--- docs/tutorials/quickstart.ipynb | 2 +- pyproject.toml | 1 - src/emcee/ensemble.py | 6 +++--- src/emcee/tests/integration/test_longdouble.py | 2 +- src/emcee/tests/integration/test_proposal.py | 4 ++-- src/emcee/tests/unit/test_backends.py | 2 +- src/emcee/tests/unit/test_blobs.py | 2 +- src/emcee/tests/unit/test_sampler.py | 4 ++-- src/emcee/tests/unit/test_state.py | 2 +- 14 files changed, 23 insertions(+), 24 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7d9b136d..069cfd07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,11 +16,11 @@ repos: exclude: docs/tutorials - repo: https://github.com/psf/black - rev: "21.12b0" + rev: "22.1.0" hooks: - id: black - repo: https://github.com/dfm/black_nbconvert - rev: v0.3.0 + rev: v0.4.0 hooks: - id: black_nbconvert diff --git a/docs/tutorials/autocorr.ipynb b/docs/tutorials/autocorr.ipynb index 3a59eb03..8ac2d520 100644 --- a/docs/tutorials/autocorr.ipynb +++ b/docs/tutorials/autocorr.ipynb @@ -450,7 +450,7 @@ "\n", "\n", "def log_prob(p):\n", - " return np.logaddexp(-0.5 * np.sum(p ** 2), -0.5 * np.sum((p - 4.0) ** 2))\n", + " return np.logaddexp(-0.5 * np.sum(p**2), -0.5 * np.sum((p - 4.0) ** 2))\n", "\n", "\n", "sampler = emcee.EnsembleSampler(32, 3, log_prob)\n", diff --git a/docs/tutorials/line.ipynb b/docs/tutorials/line.ipynb index 112a5570..47aa3b29 100644 --- a/docs/tutorials/line.ipynb +++ b/docs/tutorials/line.ipynb @@ -148,9 +148,9 @@ "source": [ "A = np.vander(x, 2)\n", "C = np.diag(yerr * yerr)\n", - "ATA = np.dot(A.T, A / (yerr ** 2)[:, None])\n", + "ATA = np.dot(A.T, A / (yerr**2)[:, None])\n", "cov = np.linalg.inv(ATA)\n", - "w = np.linalg.solve(ATA, np.dot(A.T, y / yerr ** 2))\n", + "w = np.linalg.solve(ATA, np.dot(A.T, y / yerr**2))\n", "print(\"Least-squares estimates:\")\n", "print(\"m = {0:.3f} ± {1:.3f}\".format(w[0], np.sqrt(cov[0, 0])))\n", "print(\"b = {0:.3f} ± {1:.3f}\".format(w[1], np.sqrt(cov[1, 1])))\n", @@ -218,7 +218,7 @@ "def log_likelihood(theta, x, y, yerr):\n", " m, b, log_f = theta\n", " model = m * x + b\n", - " sigma2 = yerr ** 2 + model ** 2 * np.exp(2 * log_f)\n", + " sigma2 = yerr**2 + model**2 * np.exp(2 * log_f)\n", " return -0.5 * np.sum((y - model) ** 2 / sigma2 + np.log(sigma2))" ] }, diff --git a/docs/tutorials/monitor.ipynb b/docs/tutorials/monitor.ipynb index ce52dbc2..f0cbeca4 100644 --- a/docs/tutorials/monitor.ipynb +++ b/docs/tutorials/monitor.ipynb @@ -58,7 +58,7 @@ "# We'll also use the \"blobs\" feature to track the \"log prior\" for each step\n", "def log_prob(theta):\n", " log_prior = -0.5 * np.sum((theta - 1.0) ** 2 / 100.0)\n", - " log_prob = -0.5 * np.sum(theta ** 2) + log_prior\n", + " log_prob = -0.5 * np.sum(theta**2) + log_prior\n", " return log_prob, log_prior\n", "\n", "\n", @@ -341,7 +341,7 @@ "# this time, with a subtly different prior\n", "def log_prob2(theta):\n", " log_prior = -0.5 * np.sum((theta - 2.0) ** 2 / 100.0)\n", - " log_prob = -0.5 * np.sum(theta ** 2) + log_prior\n", + " log_prob = -0.5 * np.sum(theta**2) + log_prior\n", " return log_prob, log_prior\n", "\n", "\n", diff --git a/docs/tutorials/parallel.ipynb b/docs/tutorials/parallel.ipynb index 020fe840..cb2296ba 100644 --- a/docs/tutorials/parallel.ipynb +++ b/docs/tutorials/parallel.ipynb @@ -85,7 +85,7 @@ " while True:\n", " if time.time() >= t:\n", " break\n", - " return -0.5 * np.sum(theta ** 2)" + " return -0.5 * np.sum(theta**2)" ] }, { @@ -348,7 +348,7 @@ " while True:\n", " if time.time() >= t:\n", " break\n", - " return -0.5 * np.sum(theta ** 2)\n", + " return -0.5 * np.sum(theta**2)\n", "\n", "\n", "data = np.random.randn(5000, 200)\n", @@ -459,7 +459,7 @@ " while True:\n", " if time.time() >= t:\n", " break\n", - " return -0.5 * np.sum(theta ** 2)\n", + " return -0.5 * np.sum(theta**2)\n", "\n", "\n", "with Pool() as pool:\n", diff --git a/docs/tutorials/quickstart.ipynb b/docs/tutorials/quickstart.ipynb index 60a1b2d0..9822a1c7 100644 --- a/docs/tutorials/quickstart.ipynb +++ b/docs/tutorials/quickstart.ipynb @@ -101,7 +101,7 @@ "np.random.seed(42)\n", "means = np.random.rand(ndim)\n", "\n", - "cov = 0.5 - np.random.rand(ndim ** 2).reshape((ndim, ndim))\n", + "cov = 0.5 - np.random.rand(ndim**2).reshape((ndim, ndim))\n", "cov = np.triu(cov)\n", "cov += cov.T - np.diag(cov.diagonal())\n", "cov = np.dot(cov, cov)" diff --git a/pyproject.toml b/pyproject.toml index 31d44d82..6e314f05 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,6 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 79 -target-version = ['py35'] exclude = ''' /( \.eggs diff --git a/src/emcee/ensemble.py b/src/emcee/ensemble.py index 2ee10790..a83d0620 100644 --- a/src/emcee/ensemble.py +++ b/src/emcee/ensemble.py @@ -642,7 +642,7 @@ def walkers_independent(coords): if np.any(C_colmax == 0): return False C /= C_colmax - C_colsum = np.sqrt(np.sum(C ** 2, axis=0)) + C_colsum = np.sqrt(np.sum(C**2, axis=0)) C /= C_colsum return np.linalg.cond(C.astype(float)) <= 1e8 @@ -655,11 +655,11 @@ def walkers_independent_cov(coords): def _scaled_cond(a): - asum = np.sqrt((a ** 2).sum(axis=0))[None, :] + asum = np.sqrt((a**2).sum(axis=0))[None, :] if np.any(asum == 0): return np.inf b = a / asum - bsum = np.sqrt((b ** 2).sum(axis=1))[:, None] + bsum = np.sqrt((b**2).sum(axis=1))[:, None] if np.any(bsum == 0): return np.inf c = b / bsum diff --git a/src/emcee/tests/integration/test_longdouble.py b/src/emcee/tests/integration/test_longdouble.py index c7fecf13..a3b60d4a 100644 --- a/src/emcee/tests/integration/test_longdouble.py +++ b/src/emcee/tests/integration/test_longdouble.py @@ -7,7 +7,7 @@ def test_longdouble_doesnt_crash_bug_312(): def log_prob(x, ivar): - return -0.5 * np.sum(ivar * x ** 2) + return -0.5 * np.sum(ivar * x**2) ndim, nwalkers = 5, 20 ivar = 1.0 / np.random.rand(ndim).astype(np.longdouble) diff --git a/src/emcee/tests/integration/test_proposal.py b/src/emcee/tests/integration/test_proposal.py index 8745ca86..0427af7b 100644 --- a/src/emcee/tests/integration/test_proposal.py +++ b/src/emcee/tests/integration/test_proposal.py @@ -15,11 +15,11 @@ def normal_log_prob_blobs(params): - return -0.5 * np.sum(params ** 2), params + return -0.5 * np.sum(params**2), params def normal_log_prob(params): - return -0.5 * np.sum(params ** 2) + return -0.5 * np.sum(params**2) def uniform_log_prob(params): diff --git a/src/emcee/tests/unit/test_backends.py b/src/emcee/tests/unit/test_backends.py index adece3b2..a55f2c58 100644 --- a/src/emcee/tests/unit/test_backends.py +++ b/src/emcee/tests/unit/test_backends.py @@ -23,7 +23,7 @@ def normal_log_prob(params): - return -0.5 * np.sum(params ** 2) + return -0.5 * np.sum(params**2) def normal_log_prob_blobs(params): diff --git a/src/emcee/tests/unit/test_blobs.py b/src/emcee/tests/unit/test_blobs.py index 61b9e08d..e275f2c2 100644 --- a/src/emcee/tests/unit/test_blobs.py +++ b/src/emcee/tests/unit/test_blobs.py @@ -15,7 +15,7 @@ def __init__(self, blob_function): self.blob_function = blob_function def __call__(self, params): - return -0.5 * np.sum(params ** 2), self.blob_function(params) + return -0.5 * np.sum(params**2), self.blob_function(params) @pytest.mark.parametrize("backend", backends.get_test_backends()) diff --git a/src/emcee/tests/unit/test_sampler.py b/src/emcee/tests/unit/test_sampler.py index 6e04a182..f272d4ac 100644 --- a/src/emcee/tests/unit/test_sampler.py +++ b/src/emcee/tests/unit/test_sampler.py @@ -14,7 +14,7 @@ def normal_log_prob(params): - return -0.5 * np.sum(params ** 2) + return -0.5 * np.sum(params**2) @pytest.mark.parametrize( @@ -211,7 +211,7 @@ def test_restart(backend): def test_vectorize(): def lp_vec(p): - return -0.5 * np.sum(p ** 2, axis=1) + return -0.5 * np.sum(p**2, axis=1) np.random.seed(42) nwalkers, ndim = 32, 3 diff --git a/src/emcee/tests/unit/test_state.py b/src/emcee/tests/unit/test_state.py index 44c540e8..9ecaa838 100644 --- a/src/emcee/tests/unit/test_state.py +++ b/src/emcee/tests/unit/test_state.py @@ -31,7 +31,7 @@ def test_overwrite(seed=1234): np.random.seed(seed) def ll(x): - return -0.5 * np.sum(x ** 2) + return -0.5 * np.sum(x**2) nwalkers = 64 p0 = np.random.normal(size=(nwalkers, 1))