diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 34d137d2d..549668530 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,11 +7,7 @@ repos: - id: check-yaml - id: check-added-large-files - repo: https://github.com/astral-sh/ruff-pre-commit - # Ruff version. - rev: v0.6.7 + rev: v0.14.10 hooks: - # Run the linter. - - id: ruff - args: [ --fix ] - # Run the formatter. + - id: ruff-check - id: ruff-format diff --git a/pinecone/db_data/index.py b/pinecone/db_data/index.py index 460e9b37f..537dac94b 100644 --- a/pinecone/db_data/index.py +++ b/pinecone/db_data/index.py @@ -464,9 +464,9 @@ def upsert( ) # When batch_size is provided, async_req cannot be True (checked above), # so batch_result is always UpsertResponse, not ApplyResult - assert isinstance( - batch_result, UpsertResponse - ), "batch_result must be UpsertResponse when batch_size is provided" + assert isinstance(batch_result, UpsertResponse), ( + "batch_result must be UpsertResponse when batch_size is provided" + ) pbar.update(batch_result.upserted_count) # we can't use here pbar.n for the case show_progress=False total_upserted += batch_result.upserted_count @@ -591,9 +591,9 @@ def upsert_from_dataframe( last_result = None for res in results: # upsert_from_dataframe doesn't use async_req, so res is always UpsertResponse - assert isinstance( - res, UpsertResponse - ), "Expected UpsertResponse when not using async_req" + assert isinstance(res, UpsertResponse), ( + "Expected UpsertResponse when not using async_req" + ) upserted_count += res.upserted_count last_result = res diff --git a/pinecone/db_data/resources/sync/vector.py b/pinecone/db_data/resources/sync/vector.py index 7c657321b..91ea4d76d 100644 --- a/pinecone/db_data/resources/sync/vector.py +++ b/pinecone/db_data/resources/sync/vector.py @@ -188,9 +188,9 @@ def upsert( ) # When batch_size is provided, async_req cannot be True (checked above), # so batch_result is always UpsertResponse, not ApplyResult - assert isinstance( - batch_result, UpsertResponse - ), "batch_result must be UpsertResponse when batch_size is provided" + assert isinstance(batch_result, UpsertResponse), ( + "batch_result must be UpsertResponse when batch_size is provided" + ) pbar.update(batch_result.upserted_count) # we can't use here pbar.n for the case show_progress=False total_upserted += batch_result.upserted_count @@ -289,9 +289,9 @@ def upsert_from_dataframe( for res in results: # res is always UpsertResponse when not using async_req # upsert() doesn't use async_req, so res is always UpsertResponse - assert isinstance( - res, UpsertResponse - ), "Expected UpsertResponse when not using async_req" + assert isinstance(res, UpsertResponse), ( + "Expected UpsertResponse when not using async_req" + ) upserted_count += res.upserted_count last_result = res diff --git a/pyproject.toml b/pyproject.toml index 53296c761..0436358a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,7 +122,7 @@ exclude = [ line-length = 100 indent-width = 4 -target-version = "8.0.0" +target-version = "py310" [tool.ruff.lint] # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. @@ -156,7 +156,3 @@ docstring-code-line-length = "dynamic" # E712 Allow == comparison to True/False "tests/**" = ["E712"] - -[tool.black] -line-length = 100 -target-version = ["py310"] diff --git a/tests/integration/rest_asyncio/db/data/test_upsert_sparse.py b/tests/integration/rest_asyncio/db/data/test_upsert_sparse.py index ba73f39e3..77e786ee5 100644 --- a/tests/integration/rest_asyncio/db/data/test_upsert_sparse.py +++ b/tests/integration/rest_asyncio/db/data/test_upsert_sparse.py @@ -74,8 +74,8 @@ async def test_upsert_with_batch_size_sparse(sparse_index_host, target_namespace assert "2" in fetched_vec.vectors assert "3" in fetched_vec.vectors - assert ( - fetched_vec._response_info is not None - ), "Expected _response_info to be present on fetch response" + assert fetched_vec._response_info is not None, ( + "Expected _response_info to be present on fetch response" + ) logger.info(f"Fetch response info: {fetched_vec._response_info}") await asyncio_sparse_idx.close() diff --git a/tests/unit/test_pytest_shard.py b/tests/unit/test_pytest_shard.py index 30292740a..fd54fbf6f 100644 --- a/tests/unit/test_pytest_shard.py +++ b/tests/unit/test_pytest_shard.py @@ -114,9 +114,9 @@ def test_e(): # If plugin didn't load (testdir limitation), skip this assertion if total_test_count > 0: # Plugin worked - verify sharding - assert ( - shard1_count < total_test_count or shard1_count == 0 - ), "Plugin should filter tests" + assert shard1_count < total_test_count or shard1_count == 0, ( + "Plugin should filter tests" + ) # If we got 0 tests, the plugin might have filtered them all out (unlikely but possible) # Or the plugin didn't load - either way, the test logic is sound @@ -208,7 +208,9 @@ def test_example(): "--splits must be a positive integer" in stderr_text or "unrecognized arguments" in stderr_text or "INTERNALERROR" in stderr_text - ), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ), ( + f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ) result = testdir.runpytest("--splits=-1", "--group=1") if result.ret == 3: # INTERNAL_ERROR @@ -218,7 +220,9 @@ def test_example(): "--splits must be a positive integer" in stderr_text or "unrecognized arguments" in stderr_text or "INTERNALERROR" in stderr_text - ), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ), ( + f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ) def test_validation_group_must_be_positive(self, testdir): """Test that --group must be a positive integer.""" @@ -237,7 +241,9 @@ def test_example(): "--group must be a positive integer" in stderr_text or "unrecognized arguments" in stderr_text or "INTERNALERROR" in stderr_text - ), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ), ( + f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ) result = testdir.runpytest("--splits=3", "--group=-1") if result.ret == 3: # INTERNAL_ERROR @@ -247,7 +253,9 @@ def test_example(): "--group must be a positive integer" in stderr_text or "unrecognized arguments" in stderr_text or "INTERNALERROR" in stderr_text - ), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ), ( + f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ) def test_validation_group_cannot_exceed_splits(self, testdir): """Test that --group cannot exceed --splits.""" @@ -266,7 +274,9 @@ def test_example(): "--group (4) must be between 1 and --splits (3)" in stderr_text or "unrecognized arguments" in stderr_text or "INTERNALERROR" in stderr_text - ), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ), ( + f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}" + ) def test_plugin_inactive_without_splits(self, testdir): """Test that plugin doesn't filter tests when --splits is not provided.""" diff --git a/tests/unit_grpc/test_runner.py b/tests/unit_grpc/test_runner.py index 7a7670c84..f66362ccb 100644 --- a/tests/unit_grpc/test_runner.py +++ b/tests/unit_grpc/test_runner.py @@ -23,9 +23,9 @@ def test_run_with_default_metadata(self, mocker): assert ("client-version", CLIENT_VERSION) in passed_metadata # Request id assigned for each request - assert any( - item[0] == "request_id" for item in passed_metadata - ), "request_id not found in metadata" + assert any(item[0] == "request_id" for item in passed_metadata), ( + "request_id not found in metadata" + ) for items in passed_metadata: if items[0] == "request_id": assert isinstance(items[1], str) @@ -47,9 +47,9 @@ def test_each_run_gets_unique_request_id(self, mocker): for items in mock_func.call_args.kwargs["metadata"]: if items[0] == "request_id": second_request_id = items[1] - assert ( - second_request_id != first_request_id - ), "request_id is not unique for each request" + assert second_request_id != first_request_id, ( + "request_id is not unique for each request" + ) def test_run_with_additional_metadata_from_grpc_config(self, mocker): config = Config(api_key="YOUR_API_KEY") @@ -89,9 +89,9 @@ def test_with_additional_metadata_from_run(self, mocker): assert ("service-name", "my-index") in passed_metadata assert ("client-version", CLIENT_VERSION) in passed_metadata # Request id - assert any( - item[0] == "request_id" for item in passed_metadata - ), "request_id not found in metadata" + assert any(item[0] == "request_id" for item in passed_metadata), ( + "request_id not found in metadata" + ) # Extras from configuration assert ("debug-header", "value123") in passed_metadata assert ("debug-header2", "value456") in passed_metadata diff --git a/uv.lock b/uv.lock index 8ba4bbf13..9a8defcb9 100644 --- a/uv.lock +++ b/uv.lock @@ -1580,7 +1580,7 @@ wheels = [ [[package]] name = "pinecone" -version = "7.3.0" +version = "8.0.0" source = { editable = "." } dependencies = [ { name = "certifi" },