Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions src/navigate/tools/main_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,18 +99,18 @@ def evaluate_parser_input_arguments(args):
configuration_path = args.config_file

if args.experiment_file:
assert (
args.experiment_file.exists()
), "experiment_file file Path {} not valid".format(args.experiment_file)
assert args.experiment_file.exists(), (
"experiment_file file Path {} not valid".format(args.experiment_file)
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The assertion message has a duplicated word: "experiment_file file Path ...". This reads like a typo and makes CLI failures less clear; consider changing it to something like "experiment_file Path ... not valid" for consistency with the other messages.

Suggested change
"experiment_file file Path {} not valid".format(args.experiment_file)
"experiment_file Path {} not valid".format(args.experiment_file)

Copilot uses AI. Check for mistakes.
)
experiment_path = args.experiment_file

if args.waveform_constants_file:
assert (
args.waveform_constants_path.exists()
), "waveform_constants_path Path {} not valid".format(
args.waveform_constants_path
assert args.waveform_constants_file.exists(), (
"waveform_constants_file Path {} not valid".format(
args.waveform_constants_file
)
)
waveform_constants_path = args.waveform_constants_path
waveform_constants_path = args.waveform_constants_file
Comment on lines 101 to +113
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Input validation here relies on assert statements. Assertions are stripped when Python runs with optimizations (-O), which would disable these checks in some deployments. Prefer raising a real exception (e.g., FileNotFoundError/ValueError) or calling parser.error(...) during argument parsing so invalid paths are always rejected.

Copilot uses AI. Check for mistakes.

if args.rest_api_file:
assert args.rest_api_file.exists(), "rest_api_file Path {} not valid".format(
Expand All @@ -119,21 +119,21 @@ def evaluate_parser_input_arguments(args):
rest_api_path = args.rest_api_file

if args.waveform_templates_file:
assert (
args.waveform_templates_file.exists()
), "waveform_templates Path {} not valid".format(args.waveform_templates_file)
assert args.waveform_templates_file.exists(), (
"waveform_templates Path {} not valid".format(args.waveform_templates_file)
)
waveform_templates_path = args.waveform_templates_file

if args.gui_config_file:
assert (
args.gui_config_file.exists()
), "gui_configuration Path {} not valid".format(args.gui_config_file)
assert args.gui_config_file.exists(), (
"gui_configuration Path {} not valid".format(args.gui_config_file)
)
gui_configuration_path = args.gui_config_file

if args.multi_positions_file:
assert (
args.multi_positions_file.exists()
), "multi_positions Path {} not valid".format(args.multi_positions_file)
assert args.multi_positions_file.exists(), (
"multi_positions Path {} not valid".format(args.multi_positions_file)
)
multi_positions_path = args.multi_positions_file

# Creating Loggers etc., they exist globally so no need to pass
Expand Down Expand Up @@ -177,7 +177,7 @@ def create_parser() -> argparse.ArgumentParser:
required=False,
default=False,
action="store_true",
help="Configurator - " "GUI for preparing a configuration.yaml file..",
help="Configurator - GUI for preparing a configuration.yaml file..",
)

input_args.add_argument(
Expand Down
22 changes: 22 additions & 0 deletions test/log_files/test_filters.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import logging

import pytest

from navigate.log_files.filters import NonPerfFilter, PerformanceFilter


@pytest.mark.parametrize(
("levelname", "perf_expected", "non_perf_expected"),
[
("PERFORMANCE", True, False),
("INFO", False, True),
("DEBUG", False, True),
],
)
def test_filters_route_performance_records_by_level(
levelname, perf_expected, non_perf_expected
):
record = logging.makeLogRecord({"levelname": levelname, "msg": "test message"})

assert PerformanceFilter().filter(record) is perf_expected
assert NonPerfFilter().filter(record) is non_perf_expected
95 changes: 95 additions & 0 deletions test/log_files/test_log_functions_additional.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import json
import logging
import multiprocessing as mp
from datetime import datetime, timedelta

from navigate.log_files.log_functions import (
PERFORMANCE,
eliminate_old_log_files,
find_filename,
get_folder_date,
load_performance_log,
log_setup,
)


def test_find_filename_only_matches_filename_key():
assert find_filename("filename", "debug.log") is True
assert find_filename("level", "DEBUG") is False


def test_get_folder_date_parses_valid_names_and_rejects_invalid_names():
assert get_folder_date("2026-03-21-1530") == datetime(2026, 3, 21, 15, 30)
assert get_folder_date("not-a-date") is False


def test_eliminate_old_log_files_removes_only_expired_timestamp_dirs(tmp_path):
expired = tmp_path / (datetime.now() - timedelta(days=31)).strftime("%Y-%m-%d-%H%M")
current = tmp_path / datetime.now().strftime("%Y-%m-%d-%H%M")
invalid = tmp_path / "keep-me"

expired.mkdir()
current.mkdir()
invalid.mkdir()

eliminate_old_log_files(tmp_path)

assert not expired.exists()
assert current.exists()
assert invalid.exists()


def test_load_performance_log_reads_latest_valid_log_and_skips_invalid_json(
tmp_path, monkeypatch
):
logs_dir = tmp_path / "logs"
older_dir = logs_dir / "2026-03-20-1200"
latest_dir = logs_dir / "2026-03-21-1200"
hidden_dir = logs_dir / ".ignored"

older_dir.mkdir(parents=True)
latest_dir.mkdir()
hidden_dir.mkdir()

(older_dir / "performance.log").write_text(json.dumps({"run": "older"}) + "\n")
(latest_dir / "performance.log").write_text(
json.dumps({"run": "latest"}) + "\nnot-json\n" + json.dumps({"step": 2}) + "\n"
)

monkeypatch.setattr(
"navigate.log_files.log_functions.get_navigate_path", lambda: tmp_path
)

assert load_performance_log() == [{"run": "latest"}, {"step": 2}]


def test_log_setup_returns_provided_queue_and_start_listener_creates_listener(tmp_path):
provided_queue = mp.Queue()
returned_queue = log_setup("logging.yml", tmp_path, queue=provided_queue)

assert returned_queue is provided_queue

log_queue, listener = log_setup("logging.yml", tmp_path, start_listener=True)
try:
assert log_queue is not None
logger = logging.getLogger("model")
logger.log(PERFORMANCE, json.dumps({"kind": "perf"}))
finally:
listener.stop()

timestamp_dirs = [path for path in tmp_path.iterdir() if path.is_dir()]
assert timestamp_dirs


def test_load_performance_log_returns_none_when_latest_log_file_is_missing(
tmp_path, monkeypatch
):
logs_dir = tmp_path / "logs"
latest_dir = logs_dir / "2026-03-21-1200"
latest_dir.mkdir(parents=True)

monkeypatch.setattr(
"navigate.log_files.log_functions.get_navigate_path", lambda: tmp_path
)

assert load_performance_log() is None
40 changes: 40 additions & 0 deletions test/model/data_sources/test_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import logging
import sys
import types

import pytest

from navigate.model import data_sources


@pytest.mark.parametrize(
("file_type", "module_name", "class_name"),
[
("TIFF", "tiff_data_source", "TiffDataSource"),
("OME-TIFF", "tiff_data_source", "TiffDataSource"),
("H5", "bdv_data_source", "BigDataViewerDataSource"),
("N5", "bdv_data_source", "BigDataViewerDataSource"),
("OME-Zarr", "zarr_data_source", "OMEZarrDataSource"),
],
)
def test_get_data_source_returns_expected_class(
monkeypatch, file_type, module_name, class_name
):
fake_module_name = f"{data_sources.__name__}.{module_name}"
fake_module = types.ModuleType(fake_module_name)
fake_class = type(class_name, (), {})

setattr(fake_module, class_name, fake_class)
monkeypatch.setitem(sys.modules, fake_module_name, fake_module)

assert data_sources.get_data_source(file_type) is fake_class


def test_get_data_source_logs_and_raises_for_unknown_type(caplog):
with caplog.at_level(logging.ERROR, logger="model"):
with pytest.raises(
NotImplementedError, match="Unknown file type CSV. Cannot open."
):
data_sources.get_data_source("CSV")

assert "Unknown file type CSV. Cannot open." in caplog.text
68 changes: 68 additions & 0 deletions test/model/test_threads.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
from queue import Queue
from unittest.mock import MagicMock

import pytest

import navigate.model.utils.threads as threads_module
from navigate.model.utils.exceptions import UserVisibleException


def test_thread_with_warning_enqueues_user_visible_exception():
warning_queue = Queue()
logger = MagicMock()

def target():
raise UserVisibleException("Camera disconnected")

thread = threads_module.ThreadWithWarning(
target=target,
name="worker-visible",
warning_queue=warning_queue,
logger=logger,
)

with pytest.raises(UserVisibleException, match="Camera disconnected"):
thread.run()

logger.error.assert_called_once_with(
"Error in thread worker-visible: Camera disconnected"
)
assert warning_queue.get_nowait() == ("warning", "Camera disconnected")


def test_thread_with_warning_does_not_enqueue_non_user_visible_exception():
warning_queue = Queue()
logger = MagicMock()

def target():
raise RuntimeError("boom")

thread = threads_module.ThreadWithWarning(
target=target,
name="worker-generic",
warning_queue=warning_queue,
logger=logger,
)

with pytest.raises(RuntimeError, match="boom"):
thread.run()

logger.error.assert_called_once_with("Error in thread worker-generic: boom")
assert warning_queue.empty()


def test_thread_with_warning_uses_module_logger_by_default(monkeypatch):
logger = MagicMock()
monkeypatch.setattr(threads_module, "logger", logger)
calls = []

thread = threads_module.ThreadWithWarning(
target=lambda: calls.append("ran"),
name="worker-success",
)

thread.run()

assert calls == ["ran"]
assert not hasattr(thread, "_warning_queue")
logger.error.assert_not_called()
51 changes: 51 additions & 0 deletions test/test_commit_additional.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import subprocess
from unittest.mock import MagicMock

from navigate._commit import get_git_revision_hash, get_version_from_file


def test_get_git_revision_hash_restores_working_directory_on_success(monkeypatch):
chdir = MagicMock()
monkeypatch.setattr("navigate._commit.os.getcwd", lambda: "/original")
monkeypatch.setattr("navigate._commit.os.chdir", chdir)
monkeypatch.setattr(
"navigate._commit.subprocess.check_output",
MagicMock(side_effect=[b"true", b"abc123"]),
)

assert get_git_revision_hash() == "abc123"
assert chdir.call_args_list[-1].args[0] == "/original"


def test_get_git_revision_hash_returns_none_when_git_is_unavailable(monkeypatch):
chdir = MagicMock()
monkeypatch.setattr("navigate._commit.os.getcwd", lambda: "/original")
monkeypatch.setattr("navigate._commit.os.chdir", chdir)
monkeypatch.setattr(
"navigate._commit.subprocess.check_output",
MagicMock(side_effect=FileNotFoundError),
)

assert get_git_revision_hash() is None
assert chdir.call_args_list[-1].args[0] == "/original"


def test_get_git_revision_hash_returns_none_when_repo_check_is_false(monkeypatch):
check_output = MagicMock(return_value=b"")
monkeypatch.setattr("navigate._commit.subprocess.check_output", check_output)

assert get_git_revision_hash() is None
check_output.assert_called_once_with(
["git", "rev-parse", "--is-inside-work-tree"], stderr=subprocess.DEVNULL
)


def test_get_version_from_file_reads_custom_version_file(tmp_path, monkeypatch):
version_file = tmp_path / "CUSTOM_VERSION"
version_file.write_text("9.9.9\n")

monkeypatch.setattr(
"navigate._commit.os.path.abspath", lambda path: str(tmp_path / "module.py")
)

assert get_version_from_file("CUSTOM_VERSION") == "9.9.9"
Loading
Loading