diff --git a/Justfile b/Justfile index 4ed10d7d..c748ccb0 100644 --- a/Justfile +++ b/Justfile @@ -23,7 +23,7 @@ clean: # install with all deps (and setup conda env with readdy) install: conda env update --file environment.yml - pip install -e .[lint,test,docs,dev,mcell,physicell,md,cellpack] + pip install -e .[lint,test,docs,dev,mcell,physicell,md,cellpack,mem3dg] # lint, format, and check all files lint: diff --git a/examples/Tutorial_mem3dg.ipynb b/examples/Tutorial_mem3dg.ipynb new file mode 100644 index 00000000..3ca6a8f1 --- /dev/null +++ b/examples/Tutorial_mem3dg.ipynb @@ -0,0 +1,170 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Simularium Conversion Tutorial : Mem3DG .nc Data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import Image\n", + "\n", + "import numpy as np\n", + "\n", + "from simulariumio.mem3dg import Mem3dgConverter, Mem3dgData\n", + "from simulariumio import MetaData, CameraData, UnitData" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook provides example python code for converting your own simulation trajectories into the format consumed by the Simularium Viewer. It creates a .simularium file which you can drag and drop onto the viewer like this:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![title](img/drag_drop.gif)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# _Note:_\n", + "To install simulariumio with all depencies needed for Mem3DG conversion, use `pip install simulariumio[mem3dg]`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "***\n", + "## Prepare your spatial data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The Simularium `Mem3dgConverter` consumes spatiotemporal data from Mem3DG .nc output files using the netCDF4 Python package. \n", + "\n", + "The converter requires a `Mem3dgData` object as a parameter.\n", + "\n", + "Unlike other simulariumio converters, the `Mem3dgConverter` will generate several `.obj` files (one per frame) in addition to the `.simularium` file. Use `output_obj_file_path` to specify where the generated obj files should be saved.\n", + "\n", + "The test input .nc data for this example was created using [Mem3DG's Jupyter Notebook tutorials](https://github.com/RangamaniLabUCSD/Mem3DG/blob/main/tests/python/tutorial/tutorial1.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "box_size = 5.\n", + "scale_factor = 10\n", + "\n", + "example_data = Mem3dgData(\n", + " input_file_path=\"../simulariumio/tests/data/mem3dg/traj.nc\",\n", + " output_obj_file_path=\".\",\n", + " meta_data=MetaData(\n", + " box_size=np.array([box_size, box_size, box_size]),\n", + " trajectory_title=\"Some parameter set\",\n", + " camera_defaults=CameraData(position=np.array([0, 0, 200])),\n", + " scale_factor=scale_factor\n", + " ),\n", + " agent_color=\"#a38fba\",\n", + " agent_name=\"my-object\",\n", + " time_units=UnitData(\"us\", 0.2),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert and save as .simularium file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once your data is shaped like in the `example_data` object, you can use the converter to generate the file at the given path" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Reading Mem3DG Data -------------\n", + "Converting Trajectory Data to JSON -------------\n", + "Writing JSON -------------\n", + "saved to example_mem3dg.simularium\n" + ] + } + ], + "source": [ + "converter = Mem3dgConverter(example_data).save(\"example_mem3dg\", binary=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize in the Simularium viewer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a supported web-browser (Firefox or Chrome), navigate to https://simularium.allencell.org/ and import your file into the view.\n", + "\n", + "**Note:** In order to view your data in simularium, you must import the generated .simularium file _and_ all of the generated .obj files!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pyproject.toml b/pyproject.toml index 8c3dd720..23ecb68e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,9 @@ cellpack = [ nerdss = [ "MDAnalysis>=2.0.0", ] +mem3dg = [ + "netCDF4", +] tutorial = [ "jupyter", "scipy>=1.5.2", diff --git a/simulariumio/mem3dg/__init__.py b/simulariumio/mem3dg/__init__.py new file mode 100644 index 00000000..9a11e489 --- /dev/null +++ b/simulariumio/mem3dg/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from .mem3dg_converter import Mem3dgConverter # noqa: F401 +from .mem3dg_data import Mem3dgData # noqa: F401 diff --git a/simulariumio/mem3dg/mem3dg_converter.py b/simulariumio/mem3dg/mem3dg_converter.py new file mode 100644 index 00000000..c05036fd --- /dev/null +++ b/simulariumio/mem3dg/mem3dg_converter.py @@ -0,0 +1,104 @@ +from netCDF4 import Dataset +import numpy as np +from pathlib import Path + +from ..trajectory_converter import TrajectoryConverter +from ..data_objects import ( + AgentData, + TrajectoryData, + DimensionData, + DisplayData, +) +from ..constants import DISPLAY_TYPE +from ..exceptions import InputDataError +from .mem3dg_data import Mem3dgData + + +class Mem3dgConverter(TrajectoryConverter): + def __init__( + self, + input_data: Mem3dgData, + ): + """ + Parameters + ---------- + input_data : Mem3dgData + An object containing info for reading + Mem3DG simulation trajectory output + """ + self._data = self._read(input_data) + + def write_to_obj(self, filepath, data, frame): + # Extract XYZ coordinates for vertices + coordinates = np.array( + data.groups["Trajectory"].variables["coordinates"][frame] + ) + coordinates = np.reshape(coordinates, (-1, 3)) + + # Extract indices of vertices to make faces (all triangles) + topology = np.array(data.groups["Trajectory"].variables["topology"][frame]) + topology = np.reshape(topology, (-1, 3)) + # change indices to be 1 indexed instead of 0 indexed for .obj files + topology += 1 + + # Generate one .obj file per frame + with open(filepath, "w") as file: + file.write(f"# Frame {frame}\n") + for v in coordinates: + file.write(f"v {v[0]} {v[1]} {v[2]}\n") + for t in topology: + file.write(f"f {t[0]} {t[1]} {t[2]}\n") + + def _read_traj_data(self, input_data: Mem3dgData) -> AgentData: + try: + data = Dataset(input_data.input_file_path, "r") + n_frames = np.size(data.groups["Trajectory"].variables["time"]) + except Exception as e: + raise InputDataError(f"Error reading input Mem3DG data: {e}") + + # for now, we are representing converted Mem3DG trajectories as one + # unique mesh agent per frame + dimensions = DimensionData(total_steps=n_frames, max_agents=1) + agent_data = AgentData.from_dimensions(dimensions) + agent_data.n_timesteps = n_frames + + base_agent_name = input_data.agent_name or "object" + for frame in range(n_frames): + agent_data.times[frame] = data.groups["Trajectory"].variables["time"][frame] + agent_data.n_agents[frame] = 1 + + output_file_path = Path(input_data.output_obj_file_path) / f"{frame}.obj" + self.write_to_obj(output_file_path, data, frame) + + agent_data.radii[frame][0] = input_data.meta_data.scale_factor + agent_data.unique_ids[frame][0] = frame + + name = str(frame) + agent_data.types[frame].append(name) + object_display_data = DisplayData( + name=f"{base_agent_name}#frame{frame}", + display_type=DISPLAY_TYPE.OBJ, + url=f"{frame}.obj", + color=input_data.agent_color, + ) + agent_data.display_data[name] = object_display_data + return agent_data + + def _read(self, input_data: Mem3dgData) -> TrajectoryData: + """ + Return a TrajectoryData object containing the Mem3DG data + """ + print("Reading Mem3DG Data -------------") + if input_data.meta_data.scale_factor is None: + input_data.meta_data.scale_factor = 1.0 + agent_data = self._read_traj_data(input_data) + input_data.spatial_units.multiply(1.0 / input_data.meta_data.scale_factor) + input_data.meta_data._set_box_size() + result = TrajectoryData( + meta_data=input_data.meta_data, + agent_data=agent_data, + time_units=input_data.time_units, + spatial_units=input_data.spatial_units, + plots=input_data.plots, + ) + return result diff --git a/simulariumio/mem3dg/mem3dg_data.py b/simulariumio/mem3dg/mem3dg_data.py new file mode 100644 index 00000000..28bd72c8 --- /dev/null +++ b/simulariumio/mem3dg/mem3dg_data.py @@ -0,0 +1,65 @@ +from ..data_objects import MetaData, UnitData +from typing import List, Dict, Any + + +class Mem3dgData: + input_file_path: str + output_obj_file_path: str + meta_data: MetaData + agent_name: str + agent_color: str + time_units: UnitData + spatial_units: UnitData + plots: List[Dict[str, Any]] + + def __init__( + self, + input_file_path: str, + output_obj_file_path: str = None, + meta_data: MetaData = None, + agent_name: str = None, + agent_color: str = None, + time_units: UnitData = None, + spatial_units: UnitData = None, + plots: List[Dict[str, Any]] = None, + ): + """ + Parameters + ---------- + input_file_path : str + The path to the .nc file output by Mem3DG for this trajectory. + output_obj_file_path : str (optional) + The path to the directory where output .obj files will be saved + to. If nothing is provided, the output .obj files will be saved + to the current directory. + meta_data : MetaData + An object containing metadata for the trajectory + including box size, scale factor, and camera defaults + agent_name: str (optional) + This converter generates it's own DisplayData, but the agent name + can optionally be overridden here. This will change the agent + name that is displayed on the side column in simularium + agent_color: string (optional) + This converter generates it's own DisplayData, but the agent color + can optionally be overridden here with a hex value for the color to + display, e.g "#FFFFFF" + Default: Use default colors from Simularium Viewer + time_units: UnitData (optional) + multiplier and unit name for time values + Default: 1.0 second + spatial_units: UnitData (optional) + multiplier and unit name for spatial values + (including positions, radii, and box size) + Default: 1.0 meter + plots : List[Dict[str, Any]] (optional) + An object containing plot data already + in Simularium format + """ + self.input_file_path = input_file_path + self.output_obj_file_path = output_obj_file_path or "." + self.meta_data = meta_data or MetaData() + self.agent_name = agent_name + self.agent_color = agent_color + self.time_units = time_units or UnitData("s") + self.spatial_units = spatial_units or UnitData("m") + self.plots = plots or [] diff --git a/simulariumio/tests/converters/test_mem3dg_converter.py b/simulariumio/tests/converters/test_mem3dg_converter.py new file mode 100644 index 00000000..70271a5e --- /dev/null +++ b/simulariumio/tests/converters/test_mem3dg_converter.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import pytest +import numpy as np + +from simulariumio.mem3dg import ( + Mem3dgConverter, + Mem3dgData, +) +from simulariumio import MetaData, UnitData, JsonWriter +from simulariumio.constants import ( + DEFAULT_BOX_SIZE, + DEFAULT_CAMERA_SETTINGS, + VIZ_TYPE, +) + +data = Mem3dgData(input_file_path="simulariumio/tests/data/mem3dg/traj.nc") +converter = Mem3dgConverter(data) +results = JsonWriter.format_trajectory_data(converter._data) + + +# test box data default +@pytest.mark.parametrize( + "box_size, expected_box_size", + [ + ( + results["trajectoryInfo"]["size"], + { + "x": DEFAULT_BOX_SIZE[0], + "y": DEFAULT_BOX_SIZE[1], + "z": DEFAULT_BOX_SIZE[2], + }, + ) + ], +) +def test_box_size_default(box_size, expected_box_size): + assert box_size == expected_box_size + + +# test type mapping default +@pytest.mark.parametrize( + "typeMapping, expected_typeMapping", + [ + ( + results["trajectoryInfo"]["typeMapping"], + { + "0": { + "name": "object#frame0", + "geometry": { + "displayType": "OBJ", + "url": "0.obj", + }, + }, + "1": { + "name": "object#frame1", + "geometry": { + "displayType": "OBJ", + "url": "1.obj", + }, + }, + "10": { + "name": "object#frame10", + "geometry": { + "displayType": "OBJ", + "url": "10.obj", + }, + }, + "11": { + "name": "object#frame11", + "geometry": { + "displayType": "OBJ", + "url": "11.obj", + }, + }, + "2": { + "name": "object#frame2", + "geometry": { + "displayType": "OBJ", + "url": "2.obj", + }, + }, + "3": { + "name": "object#frame3", + "geometry": { + "displayType": "OBJ", + "url": "3.obj", + }, + }, + "4": { + "name": "object#frame4", + "geometry": { + "displayType": "OBJ", + "url": "4.obj", + }, + }, + "5": { + "name": "object#frame5", + "geometry": { + "displayType": "OBJ", + "url": "5.obj", + }, + }, + "6": { + "name": "object#frame6", + "geometry": { + "displayType": "OBJ", + "url": "6.obj", + }, + }, + "7": { + "name": "object#frame7", + "geometry": { + "displayType": "OBJ", + "url": "7.obj", + }, + }, + "8": { + "name": "object#frame8", + "geometry": { + "displayType": "OBJ", + "url": "8.obj", + }, + }, + "9": { + "name": "object#frame9", + "geometry": { + "displayType": "OBJ", + "url": "9.obj", + }, + }, + }, + ) + ], +) +def test_typeMapping_default(typeMapping, expected_typeMapping): + assert expected_typeMapping == typeMapping + + +# test default camera settings +@pytest.mark.parametrize( + "camera_settings, expected_camera_settings", + [ + ( + results["trajectoryInfo"]["cameraDefault"], + { + "position": { + "x": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[0], + "y": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[1], + "z": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[2], + }, + "lookAtPosition": { + "x": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[0], + "y": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[1], + "z": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[2], + }, + "upVector": { + "x": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[0], + "y": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[1], + "z": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[2], + }, + "fovDegrees": DEFAULT_CAMERA_SETTINGS.FOV_DEGREES, + }, + ) + ], +) +def test_camera_setting_default(camera_settings, expected_camera_settings): + assert camera_settings == expected_camera_settings + + +# test time units default +@pytest.mark.parametrize( + "timeUnits, expected_timeUnits", + [ + ( + results["trajectoryInfo"]["timeUnits"], + { + "magnitude": 1.0, + "name": "s", + }, + ) + ], +) +def test_timeUnits_default(timeUnits, expected_timeUnits): + assert timeUnits == expected_timeUnits + + +# test spatial units default +expected_spatial_units = UnitData("m", 1.0) + + +@pytest.mark.parametrize( + "spatialUnits, expected_spatialUnits", + [ + ( + results["trajectoryInfo"]["spatialUnits"], + { + "magnitude": expected_spatial_units.magnitude, + "name": expected_spatial_units.name, + }, + ) + ], +) +def test_spatialUnits_default(spatialUnits, expected_spatialUnits): + assert spatialUnits == expected_spatialUnits + + +box_size = 2.0 +data_with_meta_data = Mem3dgData( + input_file_path="simulariumio/tests/data/mem3dg/traj.nc", + meta_data=MetaData( + box_size=np.array([box_size, box_size, box_size]), + ), +) +converter_meta_data = Mem3dgConverter(data_with_meta_data) +results_meta_data = JsonWriter.format_trajectory_data(converter_meta_data._data) + + +# test box size provided +@pytest.mark.parametrize( + "box_size, expected_box_size", + [ + ( + results_meta_data["trajectoryInfo"]["size"], + { + "x": box_size, + "y": box_size, + "z": box_size, + }, + ) + ], +) +def test_box_size_provided(box_size, expected_box_size): + # if a box size is provided, we should use it + assert box_size == expected_box_size + + +time_unit_name = "ns" +time_unit_value = 1.0 +spatial_unit_name = "nm" +data_with_unit_data = Mem3dgData( + input_file_path="simulariumio/tests/data/mem3dg/traj.nc", + time_units=UnitData(time_unit_name, time_unit_value), + spatial_units=UnitData(spatial_unit_name), +) +converter_unit_data = Mem3dgConverter(data_with_unit_data) +results_unit_data = JsonWriter.format_trajectory_data(converter_unit_data._data) + + +# test time units provided +@pytest.mark.parametrize( + "timeUnits, expected_timeUnits", + [ + ( + results_unit_data["trajectoryInfo"]["timeUnits"], + { + "magnitude": time_unit_value, + "name": time_unit_name, + }, + ) + ], +) +def test_timeUnits_provided(timeUnits, expected_timeUnits): + assert timeUnits == expected_timeUnits + + +# test spatial units provided +expected_spatial_units = UnitData(spatial_unit_name, 1.0) + + +@pytest.mark.parametrize( + "spatialUnits, expected_spatialUnits", + [ + ( + results_unit_data["trajectoryInfo"]["spatialUnits"], + { + "magnitude": expected_spatial_units.magnitude, + "name": expected_spatial_units.name, + }, + ) + ], +) +def test_spatialUnits_provided(spatialUnits, expected_spatialUnits): + assert spatialUnits == expected_spatialUnits + + +color = "#dfdacd" +name = "testname" + +data_with_optional_data = Mem3dgData( + input_file_path="simulariumio/tests/data/mem3dg/traj.nc", + meta_data=MetaData( + box_size=np.array([box_size, box_size, box_size]), + ), + agent_color=color, + agent_name=name, +) +converter_optional_data = Mem3dgConverter(data_with_optional_data) +results_optional_data = JsonWriter.format_trajectory_data(converter_optional_data._data) + + +# test type mapping with some optional data provided +@pytest.mark.parametrize( + "typeMapping, expected_typeMapping", + [ + ( + results_optional_data["trajectoryInfo"]["typeMapping"]["1"], + { + "name": f"{name}#frame1", + "geometry": {"displayType": "OBJ", "color": color, "url": "1.obj"}, + }, + ), + ( + results_optional_data["trajectoryInfo"]["typeMapping"]["7"], + { + "name": f"{name}#frame7", + "geometry": {"displayType": "OBJ", "color": color, "url": "7.obj"}, + }, + ), + ], +) +def test_typeMapping_with_display_data(typeMapping, expected_typeMapping): + assert expected_typeMapping == typeMapping + + +def test_agent_ids(): + assert JsonWriter._check_agent_ids_are_unique_per_frame(results_optional_data) + + +@pytest.mark.parametrize( + "bundleData, expected_bundleData", + [ + ( + results_optional_data["spatialData"]["bundleData"][0], + [ + VIZ_TYPE.DEFAULT, # first agent + 0.0, # id + 0.0, # type + 0.0, # x + 0.0, # y + 0.0, # z + 0.0, # x rotation + 0.0, # y rotation + 0.0, # z rotation + 1.0, # radius + 0.0, # number of subpoints + ], + ) + ], +) +def test_bundleData(bundleData, expected_bundleData): + assert np.isclose(expected_bundleData, bundleData["data"]).all() + + +# test the generated .obj file +vertices = [] +faces = [] +with open("1.obj", "r") as f: + for line in f: + if line.startswith("v "): + vertex = list(map(float, line.strip().split()[1:])) + vertices.append(vertex) + if line.startswith("f"): + face = list(map(int, line.strip().split()[1:])) + faces.append(face) + + +# just test first 10 vertices and first 10 faces +@pytest.mark.parametrize( + "objData, expected_objData", + [ + ( + vertices[0:10], + [ + [-0.044172474330887876, -0.03292575680180331, -1.4175751745389955], + [-0.19121607468987623, -0.01397969566814584, -1.3875799727888973], + [-0.11250378749193095, -0.17990264986600266, -1.379482681435413], + [0.05576838982803812, -0.14570813108980143, -1.399674042301235], + [0.15558132766053154, -0.25183807392352425, -1.3353307393866578], + [0.18377789077849535, -0.058222036646119635, -1.3871295603540683], + [-0.07103420756652815, 0.12890346705753208, -1.4027636325505357], + [-0.21819128564743445, 0.1535233470313994, -1.3532338908453854], + [0.08002216511514706, 0.05349158797718862, -1.414705819367836], + [0.21652298025580569, 0.12117631622076441, -1.3635688485379431], + ], + ), + ( + faces[0:10], + [ + [3, 2, 1], + [3, 1, 4], + [3, 4, 18], + [6, 5, 4], + [7, 1, 2], + [7, 2, 8], + [1, 9, 4], + [7, 9, 1], + [6, 4, 9], + [6, 9, 10], + ], + ), + ], +) +def test_obj_data_values(objData, expected_objData): + assert np.isclose(objData, expected_objData).all() + + +def test_obj_data_faces(): + # values in the faces list are 1 based indices to the vertices, + # so all values should be between 1 and the size of vertices + assert min(min(faces)) >= 1 + assert max(max(faces)) <= len(vertices) diff --git a/simulariumio/tests/data/mem3dg/traj.nc b/simulariumio/tests/data/mem3dg/traj.nc new file mode 100644 index 00000000..7fb8fc68 Binary files /dev/null and b/simulariumio/tests/data/mem3dg/traj.nc differ