Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,7 @@ repos:
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.7
rev: v0.14.10
hooks:
# Run the linter.
- id: ruff
args: [ --fix ]
# Run the formatter.
- id: ruff-check
- id: ruff-format
12 changes: 6 additions & 6 deletions pinecone/db_data/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,9 @@ def upsert(
)
# When batch_size is provided, async_req cannot be True (checked above),
# so batch_result is always UpsertResponse, not ApplyResult
assert isinstance(
batch_result, UpsertResponse
), "batch_result must be UpsertResponse when batch_size is provided"
assert isinstance(batch_result, UpsertResponse), (
"batch_result must be UpsertResponse when batch_size is provided"
)
pbar.update(batch_result.upserted_count)
# we can't use here pbar.n for the case show_progress=False
total_upserted += batch_result.upserted_count
Expand Down Expand Up @@ -591,9 +591,9 @@ def upsert_from_dataframe(
last_result = None
for res in results:
# upsert_from_dataframe doesn't use async_req, so res is always UpsertResponse
assert isinstance(
res, UpsertResponse
), "Expected UpsertResponse when not using async_req"
assert isinstance(res, UpsertResponse), (
"Expected UpsertResponse when not using async_req"
)
upserted_count += res.upserted_count
last_result = res

Expand Down
12 changes: 6 additions & 6 deletions pinecone/db_data/resources/sync/vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,9 +188,9 @@ def upsert(
)
# When batch_size is provided, async_req cannot be True (checked above),
# so batch_result is always UpsertResponse, not ApplyResult
assert isinstance(
batch_result, UpsertResponse
), "batch_result must be UpsertResponse when batch_size is provided"
assert isinstance(batch_result, UpsertResponse), (
"batch_result must be UpsertResponse when batch_size is provided"
)
pbar.update(batch_result.upserted_count)
# we can't use here pbar.n for the case show_progress=False
total_upserted += batch_result.upserted_count
Expand Down Expand Up @@ -289,9 +289,9 @@ def upsert_from_dataframe(
for res in results:
# res is always UpsertResponse when not using async_req
# upsert() doesn't use async_req, so res is always UpsertResponse
assert isinstance(
res, UpsertResponse
), "Expected UpsertResponse when not using async_req"
assert isinstance(res, UpsertResponse), (
"Expected UpsertResponse when not using async_req"
)
upserted_count += res.upserted_count
last_result = res

Expand Down
6 changes: 1 addition & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ exclude = [

line-length = 100
indent-width = 4
target-version = "8.0.0"
target-version = "py310"

[tool.ruff.lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
Expand Down Expand Up @@ -156,7 +156,3 @@ docstring-code-line-length = "dynamic"

# E712 Allow == comparison to True/False
"tests/**" = ["E712"]

[tool.black]
line-length = 100
target-version = ["py310"]
6 changes: 3 additions & 3 deletions tests/integration/rest_asyncio/db/data/test_upsert_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ async def test_upsert_with_batch_size_sparse(sparse_index_host, target_namespace
assert "2" in fetched_vec.vectors
assert "3" in fetched_vec.vectors

assert (
fetched_vec._response_info is not None
), "Expected _response_info to be present on fetch response"
assert fetched_vec._response_info is not None, (
"Expected _response_info to be present on fetch response"
)
logger.info(f"Fetch response info: {fetched_vec._response_info}")
await asyncio_sparse_idx.close()
26 changes: 18 additions & 8 deletions tests/unit/test_pytest_shard.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,9 @@ def test_e():
# If plugin didn't load (testdir limitation), skip this assertion
if total_test_count > 0:
# Plugin worked - verify sharding
assert (
shard1_count < total_test_count or shard1_count == 0
), "Plugin should filter tests"
assert shard1_count < total_test_count or shard1_count == 0, (
"Plugin should filter tests"
)
# If we got 0 tests, the plugin might have filtered them all out (unlikely but possible)
# Or the plugin didn't load - either way, the test logic is sound

Expand Down Expand Up @@ -208,7 +208,9 @@ def test_example():
"--splits must be a positive integer" in stderr_text
or "unrecognized arguments" in stderr_text
or "INTERNALERROR" in stderr_text
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
), (
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
)

result = testdir.runpytest("--splits=-1", "--group=1")
if result.ret == 3: # INTERNAL_ERROR
Expand All @@ -218,7 +220,9 @@ def test_example():
"--splits must be a positive integer" in stderr_text
or "unrecognized arguments" in stderr_text
or "INTERNALERROR" in stderr_text
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
), (
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
)

def test_validation_group_must_be_positive(self, testdir):
"""Test that --group must be a positive integer."""
Expand All @@ -237,7 +241,9 @@ def test_example():
"--group must be a positive integer" in stderr_text
or "unrecognized arguments" in stderr_text
or "INTERNALERROR" in stderr_text
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
), (
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
)

result = testdir.runpytest("--splits=3", "--group=-1")
if result.ret == 3: # INTERNAL_ERROR
Expand All @@ -247,7 +253,9 @@ def test_example():
"--group must be a positive integer" in stderr_text
or "unrecognized arguments" in stderr_text
or "INTERNALERROR" in stderr_text
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
), (
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
)

def test_validation_group_cannot_exceed_splits(self, testdir):
"""Test that --group cannot exceed --splits."""
Expand All @@ -266,7 +274,9 @@ def test_example():
"--group (4) must be between 1 and --splits (3)" in stderr_text
or "unrecognized arguments" in stderr_text
or "INTERNALERROR" in stderr_text
), f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
), (
f"Expected validation error, unrecognized args, or internal error, got: {stderr_text[:200]}"
)

def test_plugin_inactive_without_splits(self, testdir):
"""Test that plugin doesn't filter tests when --splits is not provided."""
Expand Down
18 changes: 9 additions & 9 deletions tests/unit_grpc/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ def test_run_with_default_metadata(self, mocker):
assert ("client-version", CLIENT_VERSION) in passed_metadata

# Request id assigned for each request
assert any(
item[0] == "request_id" for item in passed_metadata
), "request_id not found in metadata"
assert any(item[0] == "request_id" for item in passed_metadata), (
"request_id not found in metadata"
)
for items in passed_metadata:
if items[0] == "request_id":
assert isinstance(items[1], str)
Expand All @@ -47,9 +47,9 @@ def test_each_run_gets_unique_request_id(self, mocker):
for items in mock_func.call_args.kwargs["metadata"]:
if items[0] == "request_id":
second_request_id = items[1]
assert (
second_request_id != first_request_id
), "request_id is not unique for each request"
assert second_request_id != first_request_id, (
"request_id is not unique for each request"
)

def test_run_with_additional_metadata_from_grpc_config(self, mocker):
config = Config(api_key="YOUR_API_KEY")
Expand Down Expand Up @@ -89,9 +89,9 @@ def test_with_additional_metadata_from_run(self, mocker):
assert ("service-name", "my-index") in passed_metadata
assert ("client-version", CLIENT_VERSION) in passed_metadata
# Request id
assert any(
item[0] == "request_id" for item in passed_metadata
), "request_id not found in metadata"
assert any(item[0] == "request_id" for item in passed_metadata), (
"request_id not found in metadata"
)
# Extras from configuration
assert ("debug-header", "value123") in passed_metadata
assert ("debug-header2", "value456") in passed_metadata
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading