diff --git a/.cursorrules b/.cursorrules new file mode 100644 index 0000000..4408619 --- /dev/null +++ b/.cursorrules @@ -0,0 +1,7 @@ +These are Lead developer instructions for AI Agents : +- avoid except:pass handles, better crash with a clear error message than hide the problems. +- Use testing-driven development. +- Fix library code, not tests +- Never perform git commits +- Keep line length under 110 characters +- Finish your changes by running `make qa` in the boulder conda env. diff --git a/boulder/app.py b/boulder/app.py index c930202..f119de8 100644 --- a/boulder/app.py +++ b/boulder/app.py @@ -1,3 +1,4 @@ +import logging import os import dash @@ -10,7 +11,6 @@ ) from .config import ( get_config_from_path_with_comments, - get_initial_config, get_initial_config_with_comments, ) from .layout import get_layout @@ -43,24 +43,20 @@ server = app.server # Expose the server for deployment # Load initial configuration with optional override via environment variable -try: - # Allow overriding the initial configuration via environment variable - # Use either BOULDER_CONFIG_PATH or BOULDER_CONFIG for convenience - env_config_path = os.environ.get("BOULDER_CONFIG_PATH") or os.environ.get( - "BOULDER_CONFIG" - ) - - if env_config_path and env_config_path.strip(): - cleaned = env_config_path.strip() - initial_config, original_yaml = get_config_from_path_with_comments(cleaned) - # When a specific file is provided, propagate its base name to the UI store - provided_filename = os.path.basename(cleaned) - else: - initial_config, original_yaml = get_initial_config_with_comments() -except Exception as e: - print(f"Warning: Could not load config with comments, using standard loader: {e}") - initial_config = get_initial_config() - original_yaml = "" +# Allow overriding the initial configuration via environment variable +# Use either BOULDER_CONFIG_PATH or BOULDER_CONFIG for convenience +env_config_path = os.environ.get("BOULDER_CONFIG_PATH") or os.environ.get( + "BOULDER_CONFIG" +) + +if env_config_path and env_config_path.strip(): + cleaned = env_config_path.strip() + initial_config, original_yaml = get_config_from_path_with_comments(cleaned) + # When a specific file is provided, propagate its base name to the UI store + provided_filename = os.path.basename(cleaned) +else: + initial_config, original_yaml = get_initial_config_with_comments() + # Set the layout app.layout = get_layout( @@ -74,6 +70,41 @@ callbacks.register_callbacks(app) -def run_server(debug: bool = False, host: str = "0.0.0.0", port: int = 8050) -> None: +def run_server( + debug: bool = False, host: str = "0.0.0.0", port: int = 8050, verbose: bool = False +) -> None: """Run the Dash server.""" + if verbose: + # Configure logging for verbose output + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + logger = logging.getLogger(__name__) + logger.info("Boulder server starting in verbose mode") + logger.info(f"Server configuration: host={host}, port={port}, debug={debug}") + + # Check for potential port conflicts and log them + import socket + + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.bind((host, port)) + logger.info(f"Port {port} is available for binding") + except OSError as e: + logger.warning( + f"Port {port} binding check failed: {e} " + f"(this is normal if CLI already handled port conflicts)" + ) + + # Log initial configuration details + env_config_path = os.environ.get("BOULDER_CONFIG_PATH") or os.environ.get( + "BOULDER_CONFIG" + ) + if env_config_path: + logger.info(f"Loading configuration from: {env_config_path}") + else: + logger.info("Using default configuration") + app.run(debug=debug, host=host, port=port) diff --git a/boulder/assets/dark_mode.css b/boulder/assets/dark_mode.css index e11b5f6..6a97c3a 100644 --- a/boulder/assets/dark_mode.css +++ b/boulder/assets/dark_mode.css @@ -435,3 +435,46 @@ pre { 50% { background-position: 0 0, 0 0, 0 0; } 100% { background-position: 200px 0, -200px 0, 240px 0; } } + +/* Resizable graph container styles */ +#graph-container { + position: relative; + transition: all 0.3s ease; + border: 1px solid var(--border-color); + border-radius: 4px; + overflow: hidden; +} + +#graph-container:hover { + box-shadow: 0 4px 12px var(--shadow-hover); +} + +/* Custom resize handle styling */ +#resize-handle { + position: absolute; + bottom: 0; + left: 0; + right: 0; + height: 6px; + background: linear-gradient(90deg, transparent 0%, var(--border-color) 20%, var(--border-color) 80%, transparent 100%); + cursor: ns-resize; + opacity: 0; + transition: opacity 0.2s ease; + z-index: 10; +} + +#graph-container:hover #resize-handle { + opacity: 1; +} + +/* Visual feedback during resize */ +#graph-container.resizing { + box-shadow: 0 6px 16px var(--shadow-hover); +} + +/* Responsive adjustments for smaller screens */ +@media (max-width: 768px) { + #graph-container { + margin: 0 -15px; + } +} diff --git a/boulder/assets/graph_resize.js b/boulder/assets/graph_resize.js new file mode 100644 index 0000000..95b9770 --- /dev/null +++ b/boulder/assets/graph_resize.js @@ -0,0 +1,132 @@ +// Graph resize functionality +(function() { + 'use strict'; + + let isResizing = false; + let startY = 0; + let startHeight = 0; + let graphContainer = null; + let reactorGraph = null; + + function initializeResize() { + // Wait for the graph container to be available + const checkContainer = setInterval(() => { + graphContainer = document.getElementById('graph-container'); + reactorGraph = document.getElementById('reactor-graph'); + + if (graphContainer && reactorGraph) { + clearInterval(checkContainer); + setupResizeHandle(); + } + }, 100); + } + + function setupResizeHandle() { + // Create resize handle + const resizeHandle = document.createElement('div'); + resizeHandle.id = 'resize-handle'; + resizeHandle.style.cssText = ` + position: absolute; + bottom: 0; + left: 0; + right: 0; + height: 6px; + background: linear-gradient(90deg, transparent 0%, var(--border-color, #ccc) 20%, var(--border-color, #ccc) 80%, transparent 100%); + cursor: ns-resize; + opacity: 0; + transition: opacity 0.2s ease; + z-index: 10; + `; + + graphContainer.appendChild(resizeHandle); + + // Show handle on hover + graphContainer.addEventListener('mouseenter', () => { + resizeHandle.style.opacity = '1'; + }); + + graphContainer.addEventListener('mouseleave', () => { + if (!isResizing) { + resizeHandle.style.opacity = '0'; + } + }); + + // Handle mouse events + resizeHandle.addEventListener('mousedown', startResize); + document.addEventListener('mousemove', resize); + document.addEventListener('mouseup', stopResize); + + // Prevent text selection during resize + resizeHandle.addEventListener('selectstart', (e) => e.preventDefault()); + resizeHandle.addEventListener('dragstart', (e) => e.preventDefault()); + } + + function startResize(e) { + isResizing = true; + startY = e.clientY; + startHeight = parseInt(getComputedStyle(reactorGraph).height, 10); + + // Add visual feedback + document.body.style.cursor = 'ns-resize'; + document.body.style.userSelect = 'none'; + graphContainer.classList.add('resizing'); + + e.preventDefault(); + } + + function resize(e) { + if (!isResizing) return; + + const deltaY = e.clientY - startY; + const newHeight = Math.max(200, startHeight + deltaY); // Minimum 200px + + reactorGraph.style.height = newHeight + 'px'; + + e.preventDefault(); + } + + function stopResize() { + if (!isResizing) return; + + isResizing = false; + + // Remove visual feedback + document.body.style.cursor = ''; + document.body.style.userSelect = ''; + graphContainer.classList.remove('resizing'); + + // Hide resize handle if mouse is not over container + const resizeHandle = document.getElementById('resize-handle'); + if (resizeHandle && !graphContainer.matches(':hover')) { + resizeHandle.style.opacity = '0'; + } + } + + // Initialize when DOM is ready + if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', initializeResize); + } else { + initializeResize(); + } + + // Re-initialize when Dash updates the DOM (for dynamic content) + const observer = new MutationObserver((mutations) => { + mutations.forEach((mutation) => { + if (mutation.type === 'childList') { + const graphContainer = document.getElementById('graph-container'); + const resizeHandle = document.getElementById('resize-handle'); + + if (graphContainer && !resizeHandle) { + // Container exists but no resize handle, set it up + setTimeout(setupResizeHandle, 100); + } + } + }); + }); + + observer.observe(document.body, { + childList: true, + subtree: true + }); + +})(); diff --git a/boulder/callbacks/__init__.py b/boulder/callbacks/__init__.py index e883ec3..6e13d09 100644 --- a/boulder/callbacks/__init__.py +++ b/boulder/callbacks/__init__.py @@ -7,6 +7,7 @@ modal_callbacks, notification_callbacks, properties_callbacks, + resize_callbacks, simulation_callbacks, theme_callbacks, ) @@ -18,6 +19,7 @@ def register_callbacks(app) -> None: # type: ignore modal_callbacks.register_callbacks(app) properties_callbacks.register_callbacks(app) config_callbacks.register_callbacks(app) + resize_callbacks.register_callbacks(app) simulation_callbacks.register_callbacks(app) notification_callbacks.register_callbacks(app) clientside_callbacks.register_callbacks(app) diff --git a/boulder/callbacks/config_callbacks.py b/boulder/callbacks/config_callbacks.py index 13048ba..100d7f7 100644 --- a/boulder/callbacks/config_callbacks.py +++ b/boulder/callbacks/config_callbacks.py @@ -6,6 +6,10 @@ import yaml from dash import Input, Output, State, dcc, html +from ..verbose_utils import get_verbose_logger, is_verbose_mode + +logger = get_verbose_logger(__name__) + # Configure YAML to preserve dict order without Python tags yaml.add_representer( dict, @@ -87,6 +91,7 @@ def render_config_upload_area(file_name: str) -> tuple: Output("current-config", "data"), Output("config-file-name", "data"), Output("original-yaml-with-comments", "data"), + Output("upload-config", "contents"), # Add this to reset upload contents ], [ Input("upload-config", "contents"), @@ -119,6 +124,7 @@ def handle_config_upload_delete( from ..config import ( load_yaml_string_with_comments, normalize_config, + validate_config, ) # Use comment-preserving YAML loader @@ -130,18 +136,27 @@ def handle_config_upload_delete( # Normalize from YAML with 🪨 STONE standard to internal format normalized = normalize_config(decoded) - return normalized, upload_filename, decoded_string + # Validate the configuration (this will also convert units) + normalized = validate_config(normalized) + if is_verbose_mode(): + logger.info( + f"Successfully loaded configuration file: {upload_filename}" + ) + return normalized, upload_filename, decoded_string, dash.no_update else: print( "Only YAML format with 🪨 STONE standard (.yaml/.yml) files are supported. Got:" f" {upload_filename}" ) - return dash.no_update, "", "" + return dash.no_update, "", "", dash.no_update except Exception as e: - print(f"Error processing uploaded file: {e}") - return dash.no_update, "", "" + if is_verbose_mode(): + logger.error(f"Error processing uploaded file: {e}", exc_info=True) + else: + print(f"Error processing uploaded file: {e}") + return dash.no_update, "", "", dash.no_update elif trigger == "delete-config-file" and delete_n_clicks: - return get_initial_config(), "", "" + return get_initial_config(), "", "", None # Reset upload contents to None else: raise dash.exceptions.PreventUpdate diff --git a/boulder/callbacks/modal_callbacks.py b/boulder/callbacks/modal_callbacks.py index edac762..6f8fa62 100644 --- a/boulder/callbacks/modal_callbacks.py +++ b/boulder/callbacks/modal_callbacks.py @@ -118,31 +118,23 @@ def open_config_yaml_modal( # If we have original YAML with comments, try to preserve them if original_yaml and original_yaml.strip(): - try: - # Load original YAML with comments - original_data = load_yaml_string_with_comments(original_yaml) - - # Check if the config has actually changed by comparing the original with new stone config - original_normalized = normalize_config(original_data) - if original_normalized == config: - # Config hasn't changed, use original YAML directly - yaml_str = original_yaml - else: - # Config has changed, update while preserving comments - updated_data = _update_yaml_preserving_comments( - original_data, stone_config - ) - yaml_str = yaml_to_string_with_comments(updated_data) - except Exception as e: - print(f"Warning: Could not preserve comments: {e}") - # Fallback to standard format - yaml_str = yaml_to_string_with_comments(stone_config) + # Load original YAML with comments + original_data = load_yaml_string_with_comments(original_yaml) + + # Check if the config has actually changed by comparing the original with new stone config + original_normalized = normalize_config(original_data) + if original_normalized == config: + # Config hasn't changed, use original YAML directly + yaml_str = original_yaml + else: + # Config has changed, update while preserving comments + updated_data = _update_yaml_preserving_comments( + original_data, stone_config + ) + yaml_str = yaml_to_string_with_comments(updated_data) else: # No original YAML, use standard format - try: - yaml_str = yaml_to_string_with_comments(stone_config) - except Exception: - yaml_str = yaml.dump(stone_config, sort_keys=False, indent=2) + yaml_str = yaml_to_string_with_comments(stone_config) textarea = dcc.Textarea( id="config-yaml-editor", @@ -180,7 +172,11 @@ def update_config_from_yaml(n_clicks: int, yaml_str: str) -> Tuple[dict, bool, s raise dash.exceptions.PreventUpdate try: - from ..config import load_yaml_string_with_comments, normalize_config + from ..config import ( + load_yaml_string_with_comments, + normalize_config, + validate_config, + ) # Try to use comment-preserving YAML loader first try: @@ -190,8 +186,9 @@ def update_config_from_yaml(n_clicks: int, yaml_str: str) -> Tuple[dict, bool, s new_config = yaml.safe_load(yaml_str) normalized_config = normalize_config(new_config) + validated_config = validate_config(normalized_config) # Update the original YAML store with the new YAML string to preserve comments for future edits - return normalized_config, False, yaml_str + return validated_config, False, yaml_str except yaml.YAMLError as e: print(f"YAML Error on save: {e}") # In a real app, you'd show an error to the user here diff --git a/boulder/callbacks/notification_callbacks.py b/boulder/callbacks/notification_callbacks.py index 6b19e00..b491005 100644 --- a/boulder/callbacks/notification_callbacks.py +++ b/boulder/callbacks/notification_callbacks.py @@ -5,6 +5,10 @@ import dash from dash import Input, Output, State +from ..verbose_utils import get_verbose_logger, is_verbose_mode + +logger = get_verbose_logger(__name__) + def register_callbacks(app) -> None: # type: ignore """Register notification-related callbacks.""" @@ -44,13 +48,30 @@ def notification_handler( # Config upload if trigger == "upload-config" and upload_contents: + if is_verbose_mode(): + logger.info(f"Processing uploaded config file: {upload_filename}") try: import yaml content_type, content_string = upload_contents.split(",") decoded_string = base64.b64decode(content_string).decode("utf-8") + + if is_verbose_mode(): + logger.info( + f"File content preview (first 200 chars): {decoded_string[:200]}..." + ) + # Validate as YAML (STONE standard) instead of JSON - yaml.safe_load(decoded_string) + parsed_yaml = yaml.safe_load(decoded_string) + + if is_verbose_mode(): + keys_info = ( + list(parsed_yaml.keys()) + if isinstance(parsed_yaml, dict) + else "Not a dict" + ) + logger.info(f"YAML parsed successfully. Keys: {keys_info}") + return ( True, f"✅ Configuration loaded from {upload_filename}", @@ -59,7 +80,13 @@ def notification_handler( ) except Exception as e: message = f"Could not parse file {upload_filename}. Error: {e}" - print(f"ERROR: {message}") + if is_verbose_mode(): + logger.error( + f"File upload failed for {upload_filename}: {message}", + exc_info=True, + ) + else: + print(f"ERROR: {message}") return ( True, message, diff --git a/boulder/callbacks/resize_callbacks.py b/boulder/callbacks/resize_callbacks.py new file mode 100644 index 0000000..580ae67 --- /dev/null +++ b/boulder/callbacks/resize_callbacks.py @@ -0,0 +1,32 @@ +"""Callbacks for resizable graph container.""" + +from typing import Any, Dict + +import dash +from dash import Input, Output, State + + +def register_callbacks(app) -> None: # type: ignore + """Register resize-related callbacks.""" + + @app.callback( + Output("reactor-graph", "style"), + Input("graph-container", "n_clicks"), + State("reactor-graph", "style"), + State("graph-container", "id"), + prevent_initial_call=True, + ) + def handle_resize_click( + n_clicks: int, current_style: Dict[str, Any], container_id: str + ) -> Dict[str, Any]: + """Handle resize interactions via JavaScript.""" + if n_clicks is None: + raise dash.exceptions.PreventUpdate + + # Preserve existing style properties + updated_style = current_style.copy() if current_style else {} + updated_style["width"] = "100%" + updated_style["minHeight"] = "200px" + updated_style["overflow"] = "hidden" + + return updated_style diff --git a/boulder/callbacks/simulation_callbacks.py b/boulder/callbacks/simulation_callbacks.py index fb6b4e6..3561452 100644 --- a/boulder/callbacks/simulation_callbacks.py +++ b/boulder/callbacks/simulation_callbacks.py @@ -9,6 +9,12 @@ import plotly.graph_objects as go # type: ignore from dash import Input, Output, State +from ..verbose_utils import get_verbose_logger, is_verbose_mode + +logger = get_verbose_logger(__name__) + +REPORT_FRACTION_TRESHOLD = 1e-7 # 0.1 ppm cutoff for thermo report + def register_callbacks(app) -> None: # type: ignore """Register simulation-related callbacks.""" @@ -89,7 +95,15 @@ def run_simulation( custom_mechanism: str, uploaded_filename: str, ) -> Tuple[ - Any, Any, Any, str, Any, Dict[str, str], Dict[str, str], Dict[str, Any], bool + Any, + Any, + Any, + str, + Any, + Dict[str, str], + Dict[str, str], + Dict[str, Any], + bool, ]: from ..cantera_converter import ( CanteraConverter, @@ -99,6 +113,14 @@ def run_simulation( from ..config import USE_DUAL_CONVERTER from ..utils import apply_theme_to_figure + if is_verbose_mode(): + logger.info( + f"Starting simulation with config: {config_filename or 'default'}" + ) + logger.info( + f"Mechanism: {mechanism_select}, Custom mechanism: {bool(custom_mechanism)}" + ) + if not n_clicks or not config: return ( go.Figure(), @@ -160,7 +182,7 @@ def run_simulation( temp_fig.update_layout( title=f"Temperature vs Time — {first_id}", xaxis_title="Time (s)", - yaxis_title="Temperature (K)", + yaxis_title="Temperature (°C)", ) temp_fig = apply_theme_to_figure(temp_fig, theme) @@ -193,24 +215,15 @@ def run_simulation( try: for reactor_id, reactor in reactors_dict.items(): try: - reactor_report = reactor.report() - except Exception: - reactor_report = "" - - try: - thermo_report = reactor.thermo.report() + thermo_report = reactor.thermo.report( + threshold=REPORT_FRACTION_TRESHOLD + ) except Exception: - # Canterasupports calling the object directly - try: - thermo_callable = getattr(reactor.thermo, "__call__", None) - thermo_report = ( - thermo_callable() if callable(thermo_callable) else "" - ) - except Exception: - thermo_report = "" + # Cantera supports calling the object directly + thermo_report = "" reactor_reports[reactor_id] = { - "reactor_report": reactor_report, + "reactor_report": str(reactor), "thermo_report": thermo_report, } except Exception: @@ -238,7 +251,10 @@ def run_simulation( except Exception as e: message = f"Error during simulation: {str(e)}" - print(f"ERROR: {message}") + if is_verbose_mode(): + logger.error(f"Simulation failed: {message}", exc_info=True) + else: + print(f"ERROR: {message}") # IMPORTANT: update simulation-data with a non-empty payload so the # overlay-clearing callback (listening to simulation-data) fires. return ( @@ -248,7 +264,7 @@ def run_simulation( "", message, {"display": "block", "color": "red"}, - {"display": "none"}, + {"display": "block"}, {"error": message}, False, ) @@ -512,7 +528,7 @@ def update_plots_for_selected_node( temp_fig.update_layout( title=f"Temperature vs Time — {node_id}", xaxis_title="Time (s)", - yaxis_title="Temperature (K)", + yaxis_title="Temperature (°C)", ) temp_fig = apply_theme_to_figure(temp_fig, theme) diff --git a/boulder/cantera_converter.py b/boulder/cantera_converter.py index fb4c03a..5e841aa 100644 --- a/boulder/cantera_converter.py +++ b/boulder/cantera_converter.py @@ -1,6 +1,5 @@ import importlib import json -import logging import math import os from dataclasses import dataclass, field @@ -11,8 +10,9 @@ from .config import CANTERA_MECHANISM from .sankey import generate_sankey_input_from_sim +from .verbose_utils import get_verbose_logger, is_verbose_mode -logger = logging.getLogger(__name__) +logger = get_verbose_logger(__name__) # Custom builder/hook types @@ -47,8 +47,12 @@ def get_plugins() -> BoulderPlugins: """ global _PLUGIN_CACHE if _PLUGIN_CACHE is not None: + if is_verbose_mode(): + logger.info("Using cached plugins") return _PLUGIN_CACHE + if is_verbose_mode(): + logger.info("Discovering Boulder plugins...") plugins = BoulderPlugins() # Discover from entry points @@ -87,6 +91,14 @@ def get_plugins() -> BoulderPlugins: ) _PLUGIN_CACHE = plugins + + if is_verbose_mode(): + logger.info( + f"Plugin discovery complete: {len(plugins.reactor_builders)} reactor builders, " + f"{len(plugins.connection_builders)} connection builders, " + f"{len(plugins.post_build_hooks)} post-build hooks" + ) + return plugins @@ -194,10 +206,12 @@ def create_connection(self, conn_config: Dict[str, Any]): valve.valve_coeff = float(props.get("valve_coeff", 1.0)) elif conn_type == "Wall": # Handle walls as energy connections (e.g., torch power or losses) + # After validation, electric_power_kW is converted to kilowatts if it had units electric_power_kW = float(props.get("electric_power_kW", 0.0)) torch_eff = float(props.get("torch_eff", 1.0)) gen_eff = float(props.get("gen_eff", 1.0)) # Net heat rate into the target from the source (W) + # Convert from kW to W Q_watts = electric_power_kW * 1e3 * torch_eff * gen_eff wall = ct.Wall(source, target, A=1.0, Q=Q_watts, name=conn_config["id"]) self.walls[conn_config["id"]] = wall @@ -481,9 +495,11 @@ def build_network_and_code( self.connections[cid].valve_coeff = coeff elif typ == "Wall": # Handle walls as energy connections (e.g., torch power or losses) + # After validation, electric_power_kW is converted to kilowatts if it had units electric_power_kW = float(props.get("electric_power_kW", 0.0)) torch_eff = float(props.get("torch_eff", 1.0)) gen_eff = float(props.get("gen_eff", 1.0)) + # Convert from kW to W Q_watts = electric_power_kW * 1e3 * torch_eff * gen_eff self.code_lines.append( f"{cid} = ct.Wall({src}, {tgt}, A=1.0, Q={Q_watts}, name='{cid}')" diff --git a/boulder/cli.py b/boulder/cli.py index 007575a..f40521c 100644 --- a/boulder/cli.py +++ b/boulder/cli.py @@ -9,10 +9,31 @@ import argparse import os +import socket import sys import webbrowser +def is_port_in_use(host: str, port: int) -> bool: + """Check if a port is already in use.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + try: + sock.bind((host, port)) + return False + except OSError: + return True + + +def find_available_port(host: str, start_port: int, max_attempts: int = 10) -> int: + """Find the next available port starting from start_port.""" + for port in range(start_port, start_port + max_attempts): + if not is_port_in_use(host, port): + return port + raise RuntimeError( + f"Could not find an available port in range {start_port}-{start_port + max_attempts - 1}" + ) + + def parse_args(argv: list[str] | None = None) -> argparse.Namespace: parser = argparse.ArgumentParser( prog="boulder", @@ -47,6 +68,16 @@ def parse_args(argv: list[str] | None = None) -> argparse.Namespace: action="store_true", help="Do not open the browser automatically", ) + parser.add_argument( + "--verbose", + action="store_true", + help="Enable verbose output in server console", + ) + parser.add_argument( + "--no-port-search", + action="store_true", + help="Do not search for alternative ports if the specified port is in use", + ) return parser.parse_args(argv) @@ -57,6 +88,38 @@ def main(argv: list[str] | None = None) -> None: if args.config: os.environ["BOULDER_CONFIG_PATH"] = args.config + # Set verbose mode via environment variable for app initialization + if args.verbose: + os.environ["BOULDER_VERBOSE"] = "1" + + # Check if the requested port is available, find alternative if not + original_port = args.port + if is_port_in_use(args.host, args.port): + if args.no_port_search: + print(f"Error: Port {args.port} is already in use.") + print( + f"Please specify a different port using --port or stop the service using port {args.port}" + ) + print( + "Alternatively, remove --no-port-search to automatically find an available port." + ) + sys.exit(1) + else: + try: + available_port = find_available_port(args.host, args.port) + print( + f"Port {args.port} is already in use. Using port {available_port} instead." + ) + args.port = available_port + except RuntimeError as e: + print(f"Error: {e}") + print( + f"Please specify a different port using --port or stop the service using port {args.port}" + ) + sys.exit(1) + elif args.verbose: + print(f"Port {args.port} is available.") + # Import cantera_converter early to ensure plugins are loaded at app startup from . import cantera_converter # noqa: F401 @@ -71,7 +134,12 @@ def main(argv: list[str] | None = None) -> None: except Exception: pass - run_server(debug=args.debug, host=args.host, port=args.port) + if args.verbose and args.port != original_port: + print(f"Boulder server will start on {url} (port changed from {original_port})") + elif args.verbose: + print(f"Boulder server will start on {url}") + + run_server(debug=args.debug, host=args.host, port=args.port, verbose=args.verbose) if __name__ == "__main__": diff --git a/boulder/config.py b/boulder/config.py index 2c31739..62fe35b 100644 --- a/boulder/config.py +++ b/boulder/config.py @@ -171,6 +171,19 @@ def normalize_config(config: Dict[str, Any]) -> Dict[str, Any]: return normalized +def validate_config(config: Dict[str, Any]) -> Dict[str, Any]: + """Validate a normalized config using Pydantic schema and return a plain dict. + + This performs structural validation and cross-references (IDs, source/target) without + building any network. + """ + # Import locally to avoid import costs when not needed + from .validation import validate_normalized_config + + model = validate_normalized_config(config) + return model.dict() + + def get_initial_config() -> Dict[str, Any]: """Load the initial configuration in YAML format with 🪨 STONE standard. @@ -182,7 +195,8 @@ def get_initial_config() -> Dict[str, Any]: if os.path.exists(stone_config_path): config = load_config_file(stone_config_path) - return normalize_config(config) + normalized = normalize_config(config) + return validate_config(normalized) else: raise FileNotFoundError( f"YAML configuration file with 🪨 STONE standard not found: {stone_config_path}" @@ -202,19 +216,19 @@ def get_initial_config_with_comments() -> tuple[Dict[str, Any], str]: if os.path.exists(stone_config_path): # Load with comments preserved - try: - config_with_comments = load_config_file_with_comments(stone_config_path) - # Also read the raw file content to preserve original formatting - with open(stone_config_path, "r", encoding="utf-8") as f: - original_yaml = f.read() + config_with_comments = load_config_file_with_comments( + stone_config_path + ) # load YAML + # Also read the raw file content to preserve original formatting + with open(stone_config_path, "r", encoding="utf-8") as f: + original_yaml = f.read() + + normalized_config = normalize_config( + config_with_comments + ) # convert to STONE format + validated = validate_config(normalized_config) # validate inputs + return validated, original_yaml - normalized_config = normalize_config(config_with_comments) - return normalized_config, original_yaml - except Exception as e: - print(f"Warning: Could not load with comments preserved: {e}") - # Fallback to standard loading - config = load_config_file(stone_config_path) - return normalize_config(config), "" else: raise FileNotFoundError( f"YAML configuration file with 🪨 STONE standard not found: {stone_config_path}" @@ -226,8 +240,9 @@ def get_config_from_path(config_path: str) -> Dict[str, Any]: if not os.path.exists(config_path): raise FileNotFoundError(f"Configuration file not found: {config_path}") - config = load_config_file(config_path) - return normalize_config(config) + config = load_config_file(config_path) # load YAML + normalized = normalize_config(config) # convert to STONE format + return validate_config(normalized) # validate inputs def get_config_from_path_with_comments(config_path: str) -> tuple[Dict[str, Any], str]: @@ -241,15 +256,13 @@ def get_config_from_path_with_comments(config_path: str) -> tuple[Dict[str, Any] if not os.path.exists(config_path): raise FileNotFoundError(f"Configuration file not found: {config_path}") - try: - config_with_comments = load_config_file_with_comments(config_path) - with open(config_path, "r", encoding="utf-8") as f: - original_yaml = f.read() - return normalize_config(config_with_comments), original_yaml - except Exception: - # Fallback to standard loader - config = load_config_file(config_path) - return normalize_config(config), "" + config_with_comments = load_config_file_with_comments(config_path) # load YAML + with open(config_path, "r", encoding="utf-8") as f: + original_yaml = f.read() + normalized = normalize_config( + config_with_comments + ) # convert to STONE format with comments + return validate_config(normalized), original_yaml # validate inputs def convert_to_stone_format(config: dict) -> dict: diff --git a/boulder/layout.py b/boulder/layout.py index 57fb865..8f8f30e 100644 --- a/boulder/layout.py +++ b/boulder/layout.py @@ -171,7 +171,7 @@ def get_layout( ), dbc.Row( [ - dbc.Label("Initial Temperature (K)", width=4), + dbc.Label("Initial Temperature (°C)", width=4), dbc.Col( dbc.Input( id="reactor-temp", @@ -434,11 +434,6 @@ def get_layout( className="mb-2 w-100", # Triggered by Ctrl + Enter see clientside_callback ), - html.Div( - id="simulation-error-display", - className="mb-2", - style={"display": "none"}, - ), html.Div( id="download-python-code-btn-container", children=[], @@ -465,36 +460,50 @@ def get_layout( [ dbc.CardHeader("Reactor Network"), dbc.CardBody( - cyto.Cytoscape( - id="reactor-graph", - # Not all Cytoscape layouts are supported by Dash. - # see : https://dash.plotly.com/cytoscape/layout - layout={ - # Use dagre for a predominantly horizontal, left-to-right - # layout - # Requires dagre + cytoscape-dagre scripts, added in app - # initialization - "name": "dagre", - "rankDir": "LR", # Left-to-right - "rankSep": 120, # Horizontal spacing between ranks - "nodeSep": 60, # Minimum spacing between nodes on same rank - "edgeSep": 30, # Separation between edges - "fit": True, - "padding": 20, - }, - style={"width": "100%", "height": "360px"}, - elements=config_to_cyto_elements( - initial_config + # Resizable graph container + html.Div( + cyto.Cytoscape( + id="reactor-graph", + # Not all Cytoscape layouts are supported by Dash. + # see : https://dash.plotly.com/cytoscape/layout + layout={ + # Use dagre for a predominantly horizontal, left-to-right + # layout + # Requires dagre + cytoscape-dagre scripts, added in app + # initialization + "name": "dagre", + "rankDir": "LR", # Left-to-right + "rankSep": 120, # Horizontal spacing between ranks + "nodeSep": 60, # Minimum spacing between same-rank nodes + "edgeSep": 30, # Separation between edges + "fit": True, + "padding": 20, + }, + style={ + "width": "100%", + "height": "360px", + "minHeight": "200px", + }, + elements=config_to_cyto_elements( + initial_config + ), + minZoom=0.5, + maxZoom=2, + stylesheet=cyto_stylesheet, + responsive=True, + # Use only supported properties: + userPanningEnabled=True, + userZoomingEnabled=True, + boxSelectionEnabled=True, ), - minZoom=0.5, - maxZoom=2, - stylesheet=cyto_stylesheet, - responsive=True, - # Use only supported properties: - userPanningEnabled=True, - userZoomingEnabled=True, - boxSelectionEnabled=True, - ) + id="graph-container", + style={ + "position": "relative", + "border": "1px solid #ddd", + "borderRadius": "4px", + "overflow": "hidden", + }, + ), ), ], className="mb-3", @@ -504,6 +513,11 @@ def get_layout( dbc.CardHeader("Simulation Results"), dbc.CardBody( children=[ + html.Div( + id="simulation-error-display", + className="mb-2", + style={"display": "none"}, + ), dbc.Tabs( [ dbc.Tab( @@ -590,7 +604,7 @@ def get_layout( ], id="results-tabs", active_tab="plots-tab", - ) + ), ] ), ], diff --git a/boulder/utils.py b/boulder/utils.py index 9b4c77b..f14575d 100644 --- a/boulder/utils.py +++ b/boulder/utils.py @@ -192,7 +192,7 @@ def label_with_unit(key: str) -> str: unit_map = { "pressure": "pressure (Pa)", "composition": "composition (%mol)", - "temperature": "temperature (K)", + "temperature": "temperature (°C)", "mass_flow_rate": "mass flow rate (kg/s)", "volume": "volume (m³)", "valve_coeff": "valve coefficient (-)", diff --git a/boulder/validation.py b/boulder/validation.py new file mode 100644 index 0000000..3d1f0eb --- /dev/null +++ b/boulder/validation.py @@ -0,0 +1,357 @@ +"""Schema and validation utilities for normalized configuration. + +This module defines Pydantic models for Boulder internal (normalized) config and exposes a +`validate_normalized_config` function that validates dictionaries after `normalize_config`. + +Validation is schema-only and does not build or inspect any simulation network. +""" + +from __future__ import annotations + +from typing import Any, Dict, List, Optional, Set + +from pint import UnitRegistry +from pydantic import BaseModel, Field, validator + +# Unit suggestions for error messages +UNIT_SUGGESTIONS = { + "celsius": "temperature units like 'degC', 'degF', 'K'", + "kelvin": "temperature units like 'degC', 'degF', 'K'", + "pascal": "pressure units like 'atm', 'bar', 'Pa', 'psi'", + "kilogram": "mass units like 'kg', 'g', 'lb'", + "meter**3": "volume units like 'm3', 'L', 'mL', 'ft3'", + "kilogram/second": "flow rate units like 'kg/s', 'g/min', 'lb/hr'", + "second": "time units like 's', 'ms', 'min', 'hr'", + "watt": "power units like 'W', 'kW', 'MW', 'hp'", + "kilowatt": "power units like 'kW', 'W', 'MW', 'hp'", + "joule": "energy units like 'J', 'kJ', 'cal', 'BTU'", + "meter": "length units like 'm', 'cm', 'ft', 'in'", +} + + +class SimulationModel(BaseModel): + """Simulation section of the normalized config. + + Open schema by design: accept arbitrary keys (e.g., mechanisms, time settings). + """ + + class Config: + extra = "allow" + + +class NodeModel(BaseModel): + """Node entry in `nodes` list of normalized config.""" + + id: str = Field(min_length=1) + type: str = Field(min_length=1) + properties: Dict[str, Any] = Field(default_factory=dict) + metadata: Optional[Dict[str, Any]] = None + + @validator("properties") + def ensure_properties_is_object(cls, value: Dict[str, Any]) -> Dict[str, Any]: + if not isinstance(value, dict): + raise TypeError("node.properties must be a mapping/dict") + return value + + +class ConnectionModel(BaseModel): + """Connection entry in `connections` list of normalized config.""" + + id: str = Field(min_length=1) + type: str = Field(min_length=1) + source: str = Field(min_length=1) + target: str = Field(min_length=1) + properties: Dict[str, Any] = Field(default_factory=dict) + metadata: Optional[Dict[str, Any]] = None + + @validator("properties") + def ensure_properties_is_object(cls, value: Dict[str, Any]) -> Dict[str, Any]: + if not isinstance(value, dict): + raise TypeError("connection.properties must be a mapping/dict") + return value + + +class NormalizedConfigModel(BaseModel): + """Top-level normalized configuration model.""" + + metadata: Optional[Dict[str, Any]] = None + simulation: Optional[SimulationModel] = None + nodes: List[NodeModel] + connections: List[ConnectionModel] = Field(default_factory=list) + + def __init__(self, **data): + super().__init__(**data) + self._validate_references_and_uniqueness() + self._coerce_units() + + def _validate_references_and_uniqueness(self) -> None: + node_ids: List[str] = [n.id for n in self.nodes] + + # Unique node IDs + seen_nodes: Set[str] = set() + for nid in node_ids: + if nid in seen_nodes: + raise ValueError(f"Duplicate node id detected: '{nid}'") + seen_nodes.add(nid) + + # Unique connection IDs + seen_conns: Set[str] = set() + for conn in self.connections: + if conn.id in seen_conns: + raise ValueError(f"Duplicate connection id detected: '{conn.id}'") + seen_conns.add(conn.id) + + # Source/target references must exist + valid_nodes: Set[str] = set(node_ids) + for conn in self.connections: + if conn.source not in valid_nodes: + raise ValueError( + f"Connection '{conn.id}' source '{conn.source}' does not reference an existing node" + ) + if conn.target not in valid_nodes: + raise ValueError( + f"Connection '{conn.id}' target '{conn.target}' does not reference an existing node" + ) + + def _coerce_units(self) -> None: + """Coerce unit-bearing strings to canonical units using pint. + + Mirrors ctwrap behavior: values defined as strings with units are converted + to base magnitudes in consistent units. Unknown keys remain unchanged. + + This uses dynamic unit detection based on property names and pint's capabilities. + """ + unit_registry = UnitRegistry() + + # Define preferred target units for common physical quantities + # This maps dimensionalities to preferred units, not property names to units + preferred_units = { + "[temperature]": "celsius", + "[mass] / [length] / [time] ** 2": "pascal", # pressure + "[mass]": "kilogram", + "[length] ** 3": "meter**3", # volume + "[mass] / [time]": "kilogram/second", # mass flow rate + "[time]": "second", + "[mass] * [length] ** 2 / [time] ** 3": "watt", # power + "[mass] * [length] ** 2 / [time] ** 2": "joule", # energy + "[length]": "meter", + } + + # Special cases for property names that need specific handling + special_property_mappings = { + "electric_power_kW": "kilowatt", # Keep in kW for backward compatibility + } + + def _get_target_unit_for_property( + property_name: str, val: str + ) -> Optional[str]: + """Determine the target unit for a property based on its value and name.""" + # Check special mappings first + if property_name in special_property_mappings: + return special_property_mappings[property_name] + + # Property name-based hints for common properties + property_hints = { + "temperature": "celsius", + "pressure": "pascal", + "mass": "kilogram", + "volume": "meter**3", + "time": "second", + "dt": "second", + "end_time": "second", + "max_time": "second", + } + + # Check if property name suggests a unit type + if property_name in property_hints: + return property_hints[property_name] + + # Special handling for temperature strings (they fail in pint due to offset units) + import re + + if re.search(r"\b(degc|celsius|degf|fahrenheit|k|kelvin)\b", val.lower()): + return "celsius" # Changed to match the preferred unit + + try: + # Parse the value to determine its dimensionality + qty = unit_registry.Quantity(val) + dimensionality_str = str(qty.dimensionality) + + # Look up preferred unit for this dimensionality + return preferred_units.get(dimensionality_str) + except Exception: + return None + + def _coerce_value(val: Any, property_name: str = "") -> Any: + if isinstance(val, str): + try: + # First, determine what unit we should convert to + target_unit = _get_target_unit_for_property(property_name, val) + if not target_unit: + # If we can't determine a target unit, return as-is + return val + + # Special handling for temperature conversion (offset units) + if target_unit in ["kelvin", "celsius"]: + import re + + match = re.match( + r"([+-]?\d*\.?\d+)\s*([a-zA-Z°]+(?:[ -]?[a-zA-Z]+)*)", + val.strip(), + ) + if match: + value, temp_unit = match.groups() + try: + value = float(value) + except ValueError: + raise ValueError( + f"Could not convert '{value}' to a float for property '{property_name}'. " + "Please ensure the value is a valid number followed by a " + "temperature unit, e.g. '25 degC', '77 degF', or '298 K'." + ) + temp_unit = temp_unit.lower() + + # Convert to target temperature unit + if target_unit == "kelvin": + # Convert to Kelvin + if temp_unit in ["degc", "celsius", "c"]: + return value + 273.15 + elif temp_unit in ["degf", "fahrenheit", "f"]: + return (value - 32) * 5 / 9 + 273.15 + elif temp_unit in ["k", "kelvin"]: + return value + elif target_unit == "celsius": + # Convert to Celsius + if temp_unit in ["degc", "celsius", "c"]: + return value + elif temp_unit in ["degf", "fahrenheit", "f"]: + return (value - 32) * 5 / 9 + elif temp_unit in ["k", "kelvin"]: + return value - 273.15 + + # Use pint for all other conversions + qty = unit_registry.Quantity(val) + return qty.to(target_unit).magnitude + + except Exception as e: + # Provide helpful error message with suggested units + # First try to get suggestions based on target unit + if target_unit: + suggestion = UNIT_SUGGESTIONS.get( + target_unit, f"units compatible with {target_unit}" + ) + else: + # Try to get suggestions based on dimensionality + try: + qty = unit_registry.Quantity(val) + dimensionality = str(qty.dimensionality) + + suggestions_by_dim = { + "[temperature]": "temperature units like 'degC', 'degF', 'K'", + "[mass] / [length] / [time] ** 2": ( + "pressure units like 'atm', 'bar', 'Pa', 'psi'" + ), + "[mass]": "mass units like 'kg', 'g', 'lb'", + "[length] ** 3": "volume units like 'm3', 'L', 'mL', 'ft3'", + "[mass] / [time]": "flow rate units like 'kg/s', 'g/min', 'lb/hr'", + "[time]": "time units like 's', 'ms', 'min', 'hr'", + "[mass] * [length] ** 2 / [time] ** 3": ( + "power units like 'kW', 'W', 'MW', 'hp'" + ), + "[mass] * [length] ** 2 / [time] ** 2": ( + "energy units like 'J', 'kJ', 'cal', 'BTU'" + ), + "[length]": "length units like 'm', 'cm', 'ft', 'in'", + } + suggestion = suggestions_by_dim.get( + dimensionality, + f"units with dimensionality {dimensionality}", + ) + except Exception: + suggestion = "valid units" + + prop_info = ( + f" for property '{property_name}'" if property_name else "" + ) + raise ValueError( + f"Could not convert '{val}'{prop_info}. " + f"Please use {suggestion}. " + f"Original error: {str(e)}" + ) + return val + + # Process all properties in nodes dynamically + for node in self.nodes: + for key, value in node.properties.items(): + node.properties[key] = _coerce_value(value, key) + + # Process all properties in connections dynamically + for conn in self.connections: + for key, value in conn.properties.items(): + conn.properties[key] = _coerce_value(value, key) + + # Process simulation properties dynamically + if isinstance(self.simulation, SimulationModel): + # For simulation, we need to handle it differently since it's a Pydantic model + # with extra fields allowed. In Pydantic v1, extra fields are stored in __fields_set__ + # and accessible via dict() method or direct attribute access. + + # Get all the simulation data as a dict + sim_data = ( + self.simulation.dict() + if hasattr(self.simulation, "dict") + else self.simulation.__dict__ + ) + + # Process each field and mirror updates into __dict__ for compatibility + coerced_updates: Dict[str, Any] = {} + for key, value in sim_data.items(): + if isinstance(value, str): + coerced = _coerce_value(value, key) + try: + setattr(self.simulation, key, coerced) + # Ensure __dict__ contains coerced values for downstream consumers + coerced_updates[key] = coerced + except Exception as e: + import logging + + logging.warning( + f"Failed to set attribute '{key}' on simulation: {e}" + ) + + # In pydantic v2, extras may live outside __dict__. Update __dict__ for consumers + # that expect direct dict access (e.g., tests using model.simulation.__dict__). + if coerced_updates: + try: + self.simulation.__dict__.update(coerced_updates) + except Exception: + # Best-effort: if __dict__ is not writable, ignore silently + # (attributes have already been set above). + pass + + +def validate_normalized_config(config: Dict[str, Any]) -> NormalizedConfigModel: + """Validate a normalized config dict using Pydantic models. + + Parameters + ---------- + config + Configuration in the internal normalized format returned by `normalize_config`. + + Returns + ------- + NormalizedConfigModel + The validated, typed model. Use `.model_dump()` to get a plain dict back. + + Raises + ------ + pydantic.ValidationError + If the configuration does not match the schema or invariants. + ValueError + If cross-reference checks fail (e.g., unknown node references). + """ + # Accept only dict-like input here; normalization should already have ensured structure. + if not isinstance(config, dict): + raise TypeError("validate_normalized_config expects a mapping/dict") + + return NormalizedConfigModel(**config) diff --git a/boulder/verbose_utils.py b/boulder/verbose_utils.py new file mode 100644 index 0000000..cb0c3f5 --- /dev/null +++ b/boulder/verbose_utils.py @@ -0,0 +1,55 @@ +"""Utilities for verbose logging in Boulder.""" + +import logging +import os +from functools import wraps +from typing import Any, Callable + + +def is_verbose_mode() -> bool: + """Check if verbose mode is enabled via environment variable.""" + return os.environ.get("BOULDER_VERBOSE", "").strip() == "1" + + +def get_verbose_logger(name: str) -> logging.Logger: + """Get a logger configured for verbose output if verbose mode is enabled.""" + logger = logging.getLogger(name) + + if is_verbose_mode() and not logger.handlers: + # Only configure if verbose mode is on and logger isn't already configured + handler = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + + return logger + + +def verbose_print(*args, **kwargs) -> None: + """Print only if verbose mode is enabled.""" + if is_verbose_mode(): + print(*args, **kwargs) + + +def log_function_call(logger: logging.Logger) -> Callable: + """Log function calls in verbose mode.""" + + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args, **kwargs) -> Any: + if is_verbose_mode(): + logger.info( + f"Calling {func.__name__} with args={len(args)}, kwargs={list(kwargs.keys())}" + ) + result = func(*args, **kwargs) + if is_verbose_mode(): + logger.info(f"Completed {func.__name__}") + return result + + return wrapper + + return decorator diff --git a/ci/combined-environment-ci.yml b/ci/combined-environment-ci.yml index a8ad529..f962df7 100644 --- a/ci/combined-environment-ci.yml +++ b/ci/combined-environment-ci.yml @@ -5,6 +5,7 @@ channels: dependencies: - cantera - dash +- dash-bootstrap-components - dash-core-components - dash-extensions - dash-html-components @@ -14,9 +15,11 @@ dependencies: - myst-parser - numpy - pandas +- pint - pip - plotly - pre-commit +- pydantic>=2.0.0 - pydata-sphinx-theme - pytest - pytest-cov @@ -30,4 +33,5 @@ dependencies: - pytest-html>=3.0.0 - pytest-mock>=3.0.0 - pytest-xvfb + - types-PyYAML>=6.0.0 name: boulder diff --git a/ci/environment-ci.yml b/ci/environment-ci.yml index 85939f5..30875cb 100644 --- a/ci/environment-ci.yml +++ b/ci/environment-ci.yml @@ -24,3 +24,5 @@ dependencies: - pytest-xvfb - pytest-mock>=3.0.0 - types-PyYAML>=6.0.0 + - pint + - pydantic>=2.0.0 diff --git a/configs/default.yaml b/configs/default.yaml index 4a3325f..a62555d 100644 --- a/configs/default.yaml +++ b/configs/default.yaml @@ -20,7 +20,6 @@ nodes: temperature: 1000 # K pressure: 101325 # Pa composition: "CH4:1,O2:2,N2:7.52" - - id: res1 Reservoir: temperature: 300 # K diff --git a/configs/grouped_nodes.yaml b/configs/grouped_nodes.yaml index 6d4451d..e694ca6 100644 --- a/configs/grouped_nodes.yaml +++ b/configs/grouped_nodes.yaml @@ -12,21 +12,18 @@ nodes: Reservoir: temperature: 300 # K composition: "CH4:1,O2:2,N2:7.52" - - id: r1 IdealGasReactor: temperature: 1000 # K pressure: 101325 # Pa composition: "CH4:1,O2:2,N2:7.52" group: "Train A" # group membership - - id: r2 IdealGasReactor: temperature: 950 # K pressure: 101325 # Pa composition: "N2:1" group: "Train A" # same group; will appear inside one compound node - - id: res_out Reservoir: temperature: 300 # K @@ -38,13 +35,11 @@ connections: mass_flow_rate: 0.05 # kg/s source: res_in target: r1 - - id: mfc_mid MassFlowController: mass_flow_rate: 0.05 # kg/s source: r1 target: r2 - - id: mfc_out MassFlowController: mass_flow_rate: 0.05 # kg/s diff --git a/configs/mix_react_streams.yaml b/configs/mix_react_streams.yaml index 4fa1588..047191e 100644 --- a/configs/mix_react_streams.yaml +++ b/configs/mix_react_streams.yaml @@ -4,23 +4,24 @@ metadata: version: "3.0" author: "Boulder Configuration System" -simulation: - mechanism: "gri30.yaml" +phases: + gas: + mechanism: gri30.yaml +settings: time_step: 0.0005 # s max_time: 2.0 # s solver: "CVODE_BDF" - relative_tolerance: 1.0e-8 + relative_tolerance: 1.0e-08 absolute_tolerance: 1.0e-12 max_steps: 20000 -components: +nodes: - id: reactor1 IdealGasReactor: temperature: 1100 # K pressure: 101325 # Pa composition: "CH4:0.8,O2:1.6,N2:6.0" volume: 0.005 # m³ - - id: reactor2 IdealGasReactor: temperature: 900 # K @@ -32,13 +33,11 @@ components: Reservoir: temperature: 300 # K composition: "CH4:1,N2:2" - - id: res2 Reservoir: temperature: 320 # K pressure: 151987 # Pa composition: "O2:1,N2:3.76" - - id: mixer1 IdealGasReactor: temperature: 400 # K @@ -52,19 +51,16 @@ connections: mass_flow_rate: 0.03 # kg/s source: res1 target: reactor1 - - id: mfc2 MassFlowController: mass_flow_rate: 0.04 # kg/s source: res2 target: reactor1 - - id: mfc3 MassFlowController: mass_flow_rate: 0.025 # kg/s source: reactor1 target: mixer1 - - id: mfc4 MassFlowController: mass_flow_rate: 0.035 # kg/s diff --git a/configs/sample_configs2.yaml b/configs/sample_configs2.yaml index febb029..5005af0 100644 --- a/configs/sample_configs2.yaml +++ b/configs/sample_configs2.yaml @@ -4,28 +4,27 @@ metadata: version: "2.0" author: "Boulder Configuration System" -simulation: - mechanism: "gri30.yaml" +phases: + gas: + mechanism: gri30.yaml +settings: time_step: 0.001 # s max_time: 5.0 # s - solver: "CVODE_BDF" - relative_tolerance: 1.0e-6 - absolute_tolerance: 1.0e-9 + relative_tolerance: 1.0e-06 + absolute_tolerance: 1.0e-09 max_steps: 10000 -components: +nodes: - id: reactor1 IdealGasReactor: temperature: 1200 # K pressure: 101325 # Pa composition: "CH4:1,O2:2,N2:7.52" volume: 0.01 # m³ - - id: res1 Reservoir: temperature: 300 # K composition: "O2:1,N2:3.76" - - id: res2 Reservoir: temperature: 350 # K @@ -38,7 +37,6 @@ connections: mass_flow_rate: 0.05 # kg/s source: res1 target: reactor1 - - id: mfc2 MassFlowController: mass_flow_rate: 0.02 # kg/s diff --git a/docs/conf.py b/docs/conf.py index 54649b4..0ef8211 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,6 +40,7 @@ # Intersphinx: Reference other packages intersphinx_mapping = { "cantera": ("https://www.cantera.org/documentation/docs-2.6/sphinx/html/", None), + "ctwrap": ("https://microcombustion.github.io/ctwrap/", None), "numpy": ("https://numpy.org/doc/stable/", None), "matplotlib": ("https://matplotlib.org/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), diff --git a/environment.yml b/environment.yml index f85b629..f73e784 100644 --- a/environment.yml +++ b/environment.yml @@ -15,3 +15,5 @@ dependencies: - numpy - pandas - plotly +- pint +- pydantic diff --git a/pyproject.toml b/pyproject.toml index 6bff025..dbf112d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,8 @@ dependencies = [ "dash-cytoscape>=0.3.0", "cantera>=3.0.0", "python-dotenv>=1.0.0", - "PyYAML>=6.0" + "PyYAML>=6.0", + "pydantic>=2.0.0" ] description = "A visual interface for Cantera reactor networks" dynamic = ["version"] diff --git a/tests/test_config_validation.py b/tests/test_config_validation.py new file mode 100644 index 0000000..7692283 --- /dev/null +++ b/tests/test_config_validation.py @@ -0,0 +1,236 @@ +"""Validation tests for configuration YAML files. + +This suite verifies: +- All repository configs in `configs/` validate successfully after normalization. +- Intentionally broken examples under `tests/test_data/invalid/` raise errors. +- Unit-bearing strings are coerced to canonical magnitudes consistent with CtWrap. +""" + +from __future__ import annotations + +import glob +import os +from typing import List + +import pytest + +from boulder.config import load_config_file, normalize_config +from boulder.validation import validate_normalized_config + + +@pytest.mark.unit +def test_all_repo_configs_validate() -> None: + """Assert that every YAML file under `configs/` validates after normalization. + + This ensures library-provided examples remain compatible with the current + normalization and validation schema. + """ + repo_root = os.path.dirname(os.path.dirname(__file__)) + configs_dir = os.path.join(repo_root, "configs") + if not os.path.isdir(configs_dir): + pytest.skip("No repo-level configs directory found") + + yaml_paths: List[str] = [] + yaml_paths.extend(glob.glob(os.path.join(configs_dir, "*.yaml"))) + yaml_paths.extend(glob.glob(os.path.join(configs_dir, "*.yml"))) + + assert yaml_paths, "No YAML files found under configs/" + + failures: List[str] = [] + for path in yaml_paths: + try: + loaded = load_config_file(path) + normalized = normalize_config(loaded) + validate_normalized_config(normalized) + except Exception as exc: # noqa: BLE001 - want the full failure list + failures.append(f"{path}: {exc}") + + assert not failures, ( + "Some configs failed validation after normalization:\n" + "\n".join(failures) + ) + + +@pytest.mark.unit +def test_invalid_configs_fail_validation() -> None: + """Assert that known-invalid configs raise during validation. + + The fixtures capture three failure modes: + - duplicate_node_id.yaml: duplicate node identifiers (cross-invariant) + - unknown_connection_ref.yaml: a connection references a missing node + - missing_properties_node.yaml: `properties` is not a mapping after normalization + """ + invalid_dir = os.path.join(os.path.dirname(__file__), "test_data", "invalid") + if not os.path.isdir(invalid_dir): + pytest.skip("No invalid test_data present") + + yaml_paths: List[str] = [] + yaml_paths.extend(glob.glob(os.path.join(invalid_dir, "*.yaml"))) + yaml_paths.extend(glob.glob(os.path.join(invalid_dir, "*.yml"))) + + assert yaml_paths, "No invalid YAML files found under tests/test_data/invalid/" + + for path in yaml_paths: + loaded = load_config_file(path) + normalized = normalize_config(loaded) + with pytest.raises(Exception): + validate_normalized_config(normalized) + + +@pytest.mark.unit +def test_unit_coercion_ctwrap_compatibility() -> None: + """Unit-bearing strings are coerced to magnitudes in canonical units. + + - temperature strings (e.g., "500 degC") become Kelvin magnitudes + - pressure strings (e.g., "1 atm") become Pascals + - simulation dt strings (e.g., "10 ms") become seconds + """ + data = { + "nodes": [ + { + "id": "r1", + "type": "IdealGasReactor", + "properties": { + "temperature": "500 degC", + "pressure": "1 atm", + }, + } + ], + "connections": [], + "simulation": {"dt": "10 ms"}, + } + + normalized = normalize_config(data) + model = validate_normalized_config(normalized) + + # temperature: 500 degC = 500 C (no conversion needed, already in target unit) + assert abs(model.nodes[0].properties["temperature"] - 500.0) < 1e-6 + # pressure: 1 atm = 101325 Pa + assert abs(model.nodes[0].properties["pressure"] - 101325.0) < 1e-6 + # dt: 10 ms = 0.01 s + assert model.simulation is not None + assert abs(getattr(model.simulation, "dt") - 0.01) < 1e-12 + + +@pytest.mark.unit +def test_power_unit_coercion() -> None: + """Power units like kilowatt are properly converted to kilowatts.""" + data = { + "nodes": [ + { + "id": "r1", + "type": "IdealGasReactor", + "properties": {"temperature": "500 K", "pressure": "1 atm"}, + }, + { + "id": "r2", + "type": "IdealGasReactor", + "properties": {"temperature": "300 K", "pressure": "1 atm"}, + }, + ], + "connections": [ + { + "id": "wall1", + "type": "Wall", + "source": "r1", + "target": "r2", + "properties": { + "electric_power_kW": "550 kilowatt", # This should work now + }, + } + ], + } + + normalized = normalize_config(data) + model = validate_normalized_config(normalized) + + # electric_power_kW: 550 kilowatt = 550 kW (stays in kW due to special mapping) + assert abs(model.connections[0].properties["electric_power_kW"] - 550.0) < 1e-6 + + +@pytest.mark.unit +def test_invalid_unit_error_message() -> None: + """Invalid units should provide helpful error messages with suggestions.""" + data = { + "nodes": [ + { + "id": "r1", + "type": "IdealGasReactor", + "properties": { + "temperature": "500 invalid_unit", # This should fail with helpful message + }, + } + ], + "connections": [], + } + + normalized = normalize_config(data) + + with pytest.raises(ValueError) as exc_info: + validate_normalized_config(normalized) + + error_msg = str(exc_info.value) + assert "Could not convert '500 invalid_unit'" in error_msg + assert "temperature units like 'degC', 'degF', 'K'" in error_msg + assert "for property 'temperature'" in error_msg + + +@pytest.mark.unit +def test_dynamic_unit_system_flexibility() -> None: + """Test that the dynamic unit system can handle various unit types without hardcoding.""" + data = { + "nodes": [ + { + "id": "r1", + "type": "IdealGasReactor", + "properties": { + "temperature": "300 K", # Already in target unit + "pressure": "2 bar", # Different pressure unit + "mass": "5 g", # Mass in grams + "volume": "2 L", # Volume in liters + "custom_length": "10 cm", # Custom property with length units + "custom_energy": "1 kJ", # Custom property with energy units + }, + } + ], + "connections": [ + { + "id": "mfc1", + "type": "MassFlowController", + "source": "r1", + "target": "r1", + "properties": { + "mass_flow_rate": "0.1 g/min", # Flow rate in different units + "custom_power": "500 W", # Custom power property + }, + } + ], + "simulation": { + "dt": "1 ms", # Time in milliseconds + "end_time": "10 min", # Time in minutes + }, + } + + normalized = normalize_config(data) + model = validate_normalized_config(normalized) + + # Check that all units were converted to their canonical forms + node = model.nodes[0] + assert abs(node.properties["temperature"] - 26.85) < 1e-6 # 300 K = 26.85 C + assert abs(node.properties["pressure"] - 200000.0) < 1e-6 # 2 bar = 200000 Pa + assert abs(node.properties["mass"] - 0.005) < 1e-6 # 5 g = 0.005 kg + assert abs(node.properties["volume"] - 0.002) < 1e-6 # 2 L = 0.002 m³ + assert abs(node.properties["custom_length"] - 0.1) < 1e-6 # 10 cm = 0.1 m + assert abs(node.properties["custom_energy"] - 1000.0) < 1e-6 # 1 kJ = 1000 J + + conn = model.connections[0] + assert ( + abs(conn.properties["mass_flow_rate"] - 0.1 / 60 / 1000) < 1e-9 + ) # 0.1 g/min to kg/s + assert ( + abs(conn.properties["custom_power"] - 500.0) < 1e-6 + ) # 500 W (no conversion needed) + + # Check simulation properties + sim_dict = model.simulation.__dict__ + assert abs(sim_dict["dt"] - 0.001) < 1e-6 # 1 ms = 0.001 s + assert abs(sim_dict["end_time"] - 600.0) < 1e-6 # 10 min = 600 s diff --git a/tests/test_data/invalid/duplicate_node_id.yaml b/tests/test_data/invalid/duplicate_node_id.yaml new file mode 100644 index 0000000..b458ab4 --- /dev/null +++ b/tests/test_data/invalid/duplicate_node_id.yaml @@ -0,0 +1,9 @@ +# Invalid: duplicate node id +nodes: +- id: r1 + IdealGasReactor: + temperature: 300 +- id: r1 + IdealGasReactor: + temperature: 400 +connections: [] diff --git a/tests/test_data/invalid/missing_properties_node.yaml b/tests/test_data/invalid/missing_properties_node.yaml new file mode 100644 index 0000000..8c4ad76 --- /dev/null +++ b/tests/test_data/invalid/missing_properties_node.yaml @@ -0,0 +1,6 @@ +# Invalid: properties not a mapping in normalized form +nodes: +- id: r1 + type: IdealGasReactor + properties: 42 +connections: [] diff --git a/tests/test_data/invalid/unknown_connection_ref.yaml b/tests/test_data/invalid/unknown_connection_ref.yaml new file mode 100644 index 0000000..76d2c32 --- /dev/null +++ b/tests/test_data/invalid/unknown_connection_ref.yaml @@ -0,0 +1,11 @@ +# Invalid: connection references unknown nodes +nodes: +- id: r1 + IdealGasReactor: + temperature: 300 +connections: +- id: c1 + source: r1 + target: r2 # r2 does not exist + MassFlowController: + mass_flow_rate: 0.001 diff --git a/tests/test_unit.py b/tests/test_unit.py index 57594b3..25a92a6 100644 --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -8,6 +8,7 @@ from boulder.layout import get_layout from boulder.styles import CYTOSCAPE_STYLESHEET from boulder.utils import config_to_cyto_elements +from boulder.validation import validate_normalized_config @pytest.mark.unit @@ -24,6 +25,26 @@ def test_get_initial_config_structure(self): assert isinstance(config["nodes"], list) assert isinstance(config["connections"], list) + def test_validate_normalized_config(self): + """Validate config post-normalization without building network.""" + config = { + "nodes": [ + {"id": "r1", "type": "IdealGasReactor", "properties": {}}, + {"id": "r2", "type": "IdealGasReactor", "properties": {}}, + ], + "connections": [ + { + "id": "c1", + "type": "MassFlowController", + "source": "r1", + "target": "r2", + "properties": {}, + } + ], + } + model = validate_normalized_config(config) + assert model.nodes[0].id == "r1" + def test_get_initial_config_components(self): """Test initial config components have required fields.""" config = get_initial_config() diff --git a/tests/test_yaml_comment_system.py b/tests/test_yaml_comment_system.py index af1fb5e..887ac21 100644 --- a/tests/test_yaml_comment_system.py +++ b/tests/test_yaml_comment_system.py @@ -24,6 +24,7 @@ normalize_config, yaml_to_string_with_comments, ) +from boulder.validation import validate_normalized_config class TestYAMLCommentCore: @@ -228,6 +229,8 @@ def test_yaml_to_internal_to_stone_roundtrip(self, sample_yaml_with_comments): # Normalize to internal format internal_config = normalize_config(loaded_data) + # Validate post-normalization + validate_normalized_config(internal_config) # Verify internal format is correct assert internal_config["nodes"][0]["type"] == "IdealGasReactor" @@ -295,6 +298,8 @@ def test_stone_format_round_trip_with_comments(self, sample_yaml_with_comments): # Simulate the full application workflow for config processing original_data = load_yaml_string_with_comments(sample_yaml_with_comments) internal_config = normalize_config(original_data) + # Validate post-normalization + validate_normalized_config(internal_config) stone_config = convert_to_stone_format(internal_config) updated_data = _update_yaml_preserving_comments(original_data, stone_config) final_yaml = yaml_to_string_with_comments(updated_data)