diff --git a/.gitignore b/.gitignore
index 96ff85895..2559719f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,6 @@ debug.py
*.ipynb_checkpoints/
motions/
*_rerun*
-*.mp4
artifacts/
.venv/
render_robots.py
diff --git a/README.md b/README.md
index 1a6a1c867..79914551e 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,7 @@ curl -LsSf https://astral.sh/uv/install.sh | sh
Run the demo (no installation needed):
```bash
-uvx --from mjlab --with "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad" demo
+uvx --from mjlab --with "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6" demo
```
This launches an interactive viewer with a pre-trained Unitree G1 agent tracking a reference dance motion in MuJoCo Warp.
@@ -56,7 +56,7 @@ uv run demo
**From PyPI:**
```bash
-uv add mjlab "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad"
+uv add mjlab "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6"
```
A Dockerfile is also provided.
diff --git a/docs/index.rst b/docs/index.rst
index d94fde1ba..3cb02fa06 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -23,7 +23,7 @@ You can try mjlab *without installing anything* by using `uvx`:
# Run the mjlab demo (no local installation needed)
uvx --from mjlab \
- --with "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad" \
+ --with "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6" \
demo
If this runs, your setup is compatible with mjlab *for evaluation*.
@@ -90,4 +90,5 @@ Table of Contents
source/observation
source/actuators
source/sensors
+ source/raycast_sensor
source/distributed_training
diff --git a/docs/source/_static/pattern_grid.jpg b/docs/source/_static/pattern_grid.jpg
new file mode 100644
index 000000000..a259ad111
Binary files /dev/null and b/docs/source/_static/pattern_grid.jpg differ
diff --git a/docs/source/_static/pattern_grid.mp4 b/docs/source/_static/pattern_grid.mp4
new file mode 100644
index 000000000..65d4ae83d
Binary files /dev/null and b/docs/source/_static/pattern_grid.mp4 differ
diff --git a/docs/source/_static/pattern_pinhole.mp4 b/docs/source/_static/pattern_pinhole.mp4
new file mode 100644
index 000000000..10c12f0e3
Binary files /dev/null and b/docs/source/_static/pattern_pinhole.mp4 differ
diff --git a/docs/source/_static/ray_alignment_comparison.mp4 b/docs/source/_static/ray_alignment_comparison.mp4
new file mode 100644
index 000000000..97c30b114
Binary files /dev/null and b/docs/source/_static/ray_alignment_comparison.mp4 differ
diff --git a/docs/source/_static/raycast_demo.mp4 b/docs/source/_static/raycast_demo.mp4
new file mode 100644
index 000000000..c0ba6a2fb
Binary files /dev/null and b/docs/source/_static/raycast_demo.mp4 differ
diff --git a/docs/source/installation.rst b/docs/source/installation.rst
index 62ef9158c..67002aece 100644
--- a/docs/source/installation.rst
+++ b/docs/source/installation.rst
@@ -92,7 +92,7 @@ install. These options are interchangeable: you can switch at any time.
.. code:: bash
- uv add mjlab "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad"
+ uv add mjlab "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6"
.. note::
@@ -104,7 +104,7 @@ install. These options are interchangeable: you can switch at any time.
.. code:: bash
- uv add "mjlab @ git+https://github.com/mujocolab/mjlab" "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad"
+ uv add "mjlab @ git+https://github.com/mujocolab/mjlab" "mujoco-warp @ git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6"
.. note::
@@ -201,7 +201,7 @@ Install mjlab and dependencies via pip
.. code:: bash
- pip install git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad
+ pip install git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6
pip install mjlab
.. tab-item:: Source
@@ -210,7 +210,7 @@ Install mjlab and dependencies via pip
.. code:: bash
- pip install git+https://github.com/google-deepmind/mujoco_warp@55ab24595ea4939ff583f1910e1615361d49d9ad
+ pip install git+https://github.com/google-deepmind/mujoco_warp@1c6b31692eaa5d3134093958d839165245d196a6
git clone https://github.com/mujocolab/mjlab.git
cd mjlab
pip install -e .
diff --git a/docs/source/raycast_sensor.rst b/docs/source/raycast_sensor.rst
new file mode 100644
index 000000000..0d2d9f078
--- /dev/null
+++ b/docs/source/raycast_sensor.rst
@@ -0,0 +1,346 @@
+.. _raycast_sensor:
+
+RayCast Sensor
+==============
+
+The RayCast sensor provides GPU-accelerated raycasting for terrain scanning,
+obstacle detection, and depth sensing. It supports flexible ray patterns,
+multiple frame attachment options, and configurable alignment modes.
+
+.. raw:: html
+
+
+
+Quick Start
+-----------
+
+.. code-block:: python
+
+ from mjlab.sensor import RayCastSensorCfg, GridPatternCfg, ObjRef
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(1.0, 1.0), resolution=0.1),
+ max_distance=5.0,
+ )
+
+ scene_cfg = SceneCfg(
+ entities={"robot": robot_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ # Access at runtime.
+ sensor = env.scene["terrain_scan"]
+ data = sensor.data
+ distances = data.distances # [B, N] distance to hit, -1 if miss
+ hit_pos = data.hit_pos_w # [B, N, 3] world-space hit positions
+ normals = data.normals_w # [B, N, 3] surface normals
+
+
+Ray Patterns
+------------
+
+Ray patterns define the spatial distribution and direction of rays emitted
+from the sensor frame. Two pattern types are available for different use cases.
+
+Grid Pattern
+^^^^^^^^^^^^
+
+Parallel rays arranged in a 2D grid with fixed spatial resolution.
+
+.. image:: _static/pattern_grid.jpg
+ :width: 600
+ :align: center
+ :alt: Parallel grid ray pattern with fixed footprint
+
+.. note::
+
+ The grid pattern produces a *fixed ground footprint* that does not change
+ with sensor height. Ray spacing is defined in world units (meters).
+
+.. raw:: html
+
+
+
+.. code-block:: python
+
+ from mjlab.sensor import GridPatternCfg
+
+ pattern = GridPatternCfg(
+ size=(1.0, 1.0), # Grid dimensions in meters
+ resolution=0.1, # Spacing between rays
+ direction=(0.0, 0.0, -1.0), # Ray direction (down)
+ )
+
+**Characteristics:**
+
+- All rays are parallel
+- Spacing defined in meters
+- Ground footprint is height-invariant
+- Good for: height maps, terrain scanning, spatially uniform sampling
+
+
+Pinhole Camera Pattern
+^^^^^^^^^^^^^^^^^^^^^^
+
+Diverging rays emitted from a single origin, analogous to a depth camera or
+LiDAR sensor.
+
+.. raw:: html
+
+
+
+.. note::
+
+ Unlike the grid pattern, the pinhole pattern has a *fixed angular field of view*.
+ As the sensor moves higher, the ground coverage increases.
+
+.. code-block:: python
+
+ from mjlab.sensor import PinholeCameraPatternCfg
+
+ # Explicit parameters.
+ pattern = PinholeCameraPatternCfg(
+ width=16,
+ height=12,
+ fovy=45.0, # Vertical FOV in degrees
+ )
+
+ # From a MuJoCo camera.
+ pattern = PinholeCameraPatternCfg.from_mujoco_camera("robot/depth_cam")
+
+ # From intrinsic matrix.
+ pattern = PinholeCameraPatternCfg.from_intrinsic_matrix(
+ intrinsic_matrix=[500, 0, 320, 0, 500, 240, 0, 0, 1],
+ width=640,
+ height=480,
+ )
+
+**Characteristics:**
+
+- Rays diverge from a single point
+- Coverage defined in angular units (degrees)
+- Ground footprint increases with height
+- Good for: depth cameras, LiDAR, perspective sensing
+
+
+Pattern Comparison
+^^^^^^^^^^^^^^^^^^
+
+.. list-table::
+ :header-rows: 1
+ :widths: 20 40 40
+
+ * - Aspect
+ - Grid
+ - Pinhole
+ * - Ray direction
+ - Parallel
+ - Diverging
+ * - Spacing unit
+ - Meters
+ - Degrees (FOV)
+ * - Height affects coverage
+ - No
+ - Yes
+ * - Projection model
+ - Orthographic
+ - Perspective
+
+
+Frame Attachment
+----------------
+
+Rays attach to a frame in the scene via ``ObjRef``.
+
+.. code-block:: python
+
+ frame = ObjRef(type="body", name="base", entity="robot")
+ frame = ObjRef(type="site", name="scan_site", entity="robot")
+ frame = ObjRef(type="geom", name="sensor_mount", entity="robot")
+
+The ``exclude_parent_body`` option (default: ``True``) prevents rays from
+hitting the body to which they are attached.
+
+
+Ray Alignment
+-------------
+
+The ``ray_alignment`` setting controls how rays orient relative to the frame
+when the body rotates.
+
+.. raw:: html
+
+
+
+.. list-table::
+ :header-rows: 1
+ :widths: 15 45 40
+
+ * - Mode
+ - Description
+ - Use Case
+ * - ``"base"``
+ - Full position and rotation
+ - Body-mounted sensors
+ * - ``"yaw"``
+ - Ignores pitch and roll
+ - Terrain height maps
+ * - ``"world"``
+ - Fixed world direction
+ - Gravity-aligned sensing
+
+.. code-block:: python
+
+ RayCastSensorCfg(
+ name="height_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(1.0, 1.0), resolution=0.1),
+ ray_alignment="yaw",
+ )
+
+
+Geom Group Filtering
+--------------------
+
+MuJoCo geoms can be assigned to groups 0-5. Use ``include_geom_groups`` to
+restrict which geoms rays can hit.
+
+.. code-block:: python
+
+ RayCastSensorCfg(
+ name="terrain_only",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(),
+ include_geom_groups=(0, 1),
+ )
+
+
+Output Data
+-----------
+
+The sensor returns ``RayCastData``:
+
+.. code-block:: python
+
+ @dataclass
+ class RayCastData:
+ distances: Tensor # [B, N] distance to hit, -1 if miss
+ hit_pos_w: Tensor # [B, N, 3] world-space hit positions
+ normals_w: Tensor # [B, N, 3] surface normals
+ pos_w: Tensor # [B, 3] sensor frame position
+ quat_w: Tensor # [B, 4] sensor frame orientation (w, x, y, z)
+
+``B`` is the number of environments and ``N`` is the number of rays.
+
+
+Debug Visualization
+-------------------
+
+Enable visualization with ``debug_vis=True``:
+
+.. code-block:: python
+
+ RayCastSensorCfg(
+ name="scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(),
+ debug_vis=True,
+ )
+
+
+Examples
+--------
+
+Height Map for Locomotion
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: python
+
+ # Dense grid for terrain-aware locomotion.
+ height_scan = RayCastSensorCfg(
+ name="height_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(
+ size=(1.6, 1.0),
+ resolution=0.1,
+ direction=(0.0, 0.0, -1.0),
+ ),
+ ray_alignment="yaw", # Stay level on slopes
+ max_distance=2.0,
+ exclude_parent_body=True,
+ )
+
+ # In observation function.
+ def height_obs(env: ManagerBasedRlEnv) -> torch.Tensor:
+ sensor = env.scene["height_scan"]
+ return sensor.data.distances # [B, N]
+
+
+Depth Camera Simulation
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: python
+
+ # Simulate a depth camera.
+ depth_cam = RayCastSensorCfg(
+ name="depth",
+ frame=ObjRef(type="site", name="camera_site", entity="robot"),
+ pattern=PinholeCameraPatternCfg.from_mujoco_camera("robot/depth_cam"),
+ max_distance=10.0,
+ )
+
+ # Reshape to image.
+ def depth_image(env: ManagerBasedRlEnv) -> torch.Tensor:
+ sensor = env.scene["depth"]
+ distances = sensor.data.distances # [B, W*H]
+ return distances.view(-1, 12, 16) # [B, H, W]
+
+
+Obstacle Detection
+^^^^^^^^^^^^^^^^^^
+
+.. code-block:: python
+
+ # Forward-facing obstacle scan.
+ obstacle_scan = RayCastSensorCfg(
+ name="obstacle",
+ frame=ObjRef(type="body", name="head", entity="robot"),
+ pattern=GridPatternCfg(
+ size=(0.5, 0.3),
+ resolution=0.1,
+ direction=(-1.0, 0.0, 0.0), # Forward
+ ),
+ max_distance=3.0,
+ include_geom_groups=(0,), # Filtering to only group 0 geoms
+ )
+
+
+Running the Demo
+----------------
+
+A demo script is included to visualize the sensor on varied terrain:
+
+.. code-block:: bash
+
+ # Grid pattern (default)
+ uv run mjpython scripts/demos/raycast_sensor.py --pattern grid
+
+ # Pinhole camera pattern
+ uv run mjpython scripts/demos/raycast_sensor.py --pattern pinhole
+
+ # With yaw alignment
+ uv run mjpython scripts/demos/raycast_sensor.py --alignment yaw
+
+ # Viser viewer (for remote/headless)
+ uv run python scripts/demos/raycast_sensor.py --viewer viser
diff --git a/docs/source/sensors.rst b/docs/source/sensors.rst
index efd0af878..cffd0001a 100644
--- a/docs/source/sensors.rst
+++ b/docs/source/sensors.rst
@@ -63,7 +63,7 @@ Quick Note: Entity Data vs Sensors
Sensor Types
------------
-mjlab currently provides two sensor implementations:
+mjlab provides three sensor implementations:
BuiltinSensor
^^^^^^^^^^^^^
@@ -75,6 +75,12 @@ ContactSensor
Detects contacts between bodies, geoms, or subtrees. Returns structured
`ContactData` with forces, positions, air time metrics, etc.
+RayCastSensor
+^^^^^^^^^^^^^
+GPU-accelerated raycasting for terrain scanning and depth sensing. Supports
+grid and pinhole camera patterns with configurable alignment modes.
+See :ref:`raycast_sensor` for full documentation.
+
BuiltinSensor
-------------
diff --git a/pyproject.toml b/pyproject.toml
index ac5790f4a..b5c303a73 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -117,7 +117,7 @@ url = "https://py.mujoco.org"
explicit = true
[tool.uv.sources]
-mujoco-warp = { git = "https://github.com/google-deepmind/mujoco_warp", rev = "55ab24595ea4939ff583f1910e1615361d49d9ad"}
+mujoco-warp = { git = "https://github.com/google-deepmind/mujoco_warp", rev = "1c6b31692eaa5d3134093958d839165245d196a6"}
warp-lang = { index = "nvidia", marker = "sys_platform != 'darwin'" }
mujoco = { index = "mujoco" }
torch = { index = "pytorch-cu128", extra = "cu128", marker = "sys_platform != 'darwin'" }
diff --git a/scripts/demos/raycast_sensor.py b/scripts/demos/raycast_sensor.py
new file mode 100644
index 000000000..26b74ef14
--- /dev/null
+++ b/scripts/demos/raycast_sensor.py
@@ -0,0 +1,258 @@
+"""Raycast sensor demo.
+
+Run with:
+ uv run mjpython scripts/demos/raycast_sensor.py [--viewer native|viser] # macOS
+ uv run python scripts/demos/raycast_sensor.py [--viewer native|viser] # Linux
+
+Examples:
+ # Grid pattern (default)
+ uv run python scripts/demos/raycast_sensor.py --pattern grid
+
+ # Pinhole camera pattern
+ uv run python scripts/demos/raycast_sensor.py --pattern pinhole
+
+ # With yaw alignment (ignores pitch/roll)
+ uv run python scripts/demos/raycast_sensor.py --alignment yaw
+
+If using the native viewer, you can launch in interactive mode with:
+ uv run mjpython scripts/demos/raycast_sensor.py --viewer native --interactive
+"""
+
+from __future__ import annotations
+
+import os
+from typing import Literal
+
+import mujoco
+import numpy as np
+import torch
+import tyro
+
+import mjlab.terrains as terrain_gen
+from mjlab.entity import EntityCfg
+from mjlab.envs import ManagerBasedRlEnv, ManagerBasedRlEnvCfg
+from mjlab.rl import RslRlVecEnvWrapper
+from mjlab.scene import SceneCfg
+from mjlab.sensor import (
+ GridPatternCfg,
+ ObjRef,
+ PinholeCameraPatternCfg,
+ RayCastSensorCfg,
+)
+from mjlab.terrains.terrain_generator import TerrainGeneratorCfg
+from mjlab.terrains.terrain_importer import TerrainImporterCfg
+from mjlab.utils.torch import configure_torch_backends
+from mjlab.viewer import NativeMujocoViewer, ViserPlayViewer
+
+
+def create_scanner_spec() -> mujoco.MjSpec:
+ spec = mujoco.MjSpec()
+ spec.modelname = "scanner"
+
+ mat = spec.add_material()
+ mat.name = "scanner_mat"
+ mat.rgba[:] = (1.0, 0.5, 0.0, 0.9)
+
+ scanner = spec.worldbody.add_body(mocap=True)
+ scanner.name = "scanner"
+ scanner.pos[:] = (0, 0, 2.0)
+
+ geom = scanner.add_geom()
+ geom.name = "scanner_geom"
+ geom.type = mujoco.mjtGeom.mjGEOM_BOX
+ geom.size[:] = (0.15, 0.15, 0.05)
+ geom.mass = 1.0
+ geom.material = "scanner_mat"
+
+ scanner.add_camera(name="scanner", fovy=58.0, resolution=(16, 12))
+
+ record_cam = scanner.add_camera(name="record_cam")
+ record_cam.pos[:] = (2, 0, 2)
+ record_cam.fovy = 40.0
+ record_cam.mode = mujoco.mjtCamLight.mjCAMLIGHT_TARGETBODY
+ record_cam.targetbody = "scanner"
+
+ return spec
+
+
+def create_env_cfg(
+ pattern: Literal["grid", "pinhole"],
+ alignment: Literal["base", "yaw", "world"],
+) -> ManagerBasedRlEnvCfg:
+ custom_terrain_cfg = TerrainGeneratorCfg(
+ size=(4.0, 4.0),
+ border_width=0.5,
+ num_rows=1,
+ num_cols=4,
+ curriculum=True,
+ sub_terrains={
+ "pyramid_stairs_inv": terrain_gen.BoxInvertedPyramidStairsTerrainCfg(
+ proportion=0.25,
+ step_height_range=(0.1, 0.25),
+ step_width=0.3,
+ platform_width=1.5,
+ border_width=0.25,
+ ),
+ "hf_pyramid_slope_inv": terrain_gen.HfPyramidSlopedTerrainCfg(
+ proportion=0.25,
+ slope_range=(0.6, 1.5),
+ platform_width=1.5,
+ border_width=0.25,
+ inverted=True,
+ ),
+ "random_rough": terrain_gen.HfRandomUniformTerrainCfg(
+ proportion=0.25,
+ noise_range=(0.05, 0.15),
+ noise_step=0.02,
+ border_width=0.25,
+ ),
+ "wave_terrain": terrain_gen.HfWaveTerrainCfg(
+ proportion=0.25,
+ amplitude_range=(0.15, 0.25),
+ num_waves=3,
+ border_width=0.25,
+ ),
+ },
+ add_lights=False,
+ )
+
+ terrain_cfg = TerrainImporterCfg(
+ terrain_type="generator",
+ terrain_generator=custom_terrain_cfg,
+ num_envs=1,
+ )
+
+ scanner_entity_cfg = EntityCfg(
+ spec_fn=create_scanner_spec,
+ init_state=EntityCfg.InitialStateCfg(pos=(0.65, -0.4, 0.5)),
+ )
+
+ if pattern == "grid":
+ pattern_cfg = GridPatternCfg(
+ size=(0.6, 0.6),
+ resolution=0.1,
+ direction=(0.0, 0.0, -1.0),
+ )
+ else:
+ assert pattern == "pinhole"
+ pattern_cfg = PinholeCameraPatternCfg.from_mujoco_camera("scanner/scanner")
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="scanner", entity="scanner"),
+ pattern=pattern_cfg,
+ ray_alignment=alignment,
+ max_distance=5.0,
+ exclude_parent_body=True,
+ debug_vis=True,
+ viz=RayCastSensorCfg.VizCfg(
+ hit_color=(0.0, 1.0, 0.0, 0.9),
+ miss_color=(1.0, 0.0, 0.0, 0.5),
+ show_rays=False,
+ show_normals=True,
+ ),
+ )
+
+ cfg = ManagerBasedRlEnvCfg(
+ decimation=10,
+ episode_length_s=1e9,
+ scene=SceneCfg(
+ num_envs=1,
+ env_spacing=0.0,
+ extent=2.0,
+ terrain=terrain_cfg,
+ entities={
+ "scanner": scanner_entity_cfg,
+ },
+ sensors=(raycast_cfg,),
+ ),
+ observations={},
+ actions={},
+ rewards={},
+ terminations={},
+ )
+
+ cfg.viewer.body_name = "scanner"
+ cfg.viewer.distance = 12.0
+ cfg.viewer.elevation = -25.0
+ cfg.viewer.azimuth = 135.0
+
+ return cfg
+
+
+def main(
+ viewer: str = "auto",
+ interactive: bool = False,
+ pattern: Literal["grid", "pinhole"] = "grid",
+ alignment: Literal["base", "yaw", "world"] = "base",
+) -> None:
+ configure_torch_backends()
+
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
+
+ print("=" * 60)
+ print("Raycast Sensor Demo - 4 Terrain Types")
+ print(f" Pattern: {pattern}")
+ print(f" Alignment: {alignment}")
+ print("=" * 60)
+ print()
+
+ env_cfg = create_env_cfg(pattern, alignment)
+ env = ManagerBasedRlEnv(cfg=env_cfg, device=device)
+ env = RslRlVecEnvWrapper(env)
+
+ if viewer == "auto":
+ has_display = bool(os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY"))
+ resolved_viewer = "native" if has_display else "viser"
+ else:
+ resolved_viewer = viewer
+
+ use_auto_scan = (resolved_viewer == "viser") or (not interactive)
+
+ if use_auto_scan:
+
+ class AutoScanPolicy:
+ def __init__(self):
+ self.step_count = 0
+
+ def __call__(self, obs) -> torch.Tensor:
+ del obs
+ t = self.step_count * 0.005
+ y_period = 1000
+ y_normalized = (self.step_count % y_period) / y_period
+ y = -8.0 + 16.0 * y_normalized
+ x = 1.5 * np.sin(2 * np.pi * t * 0.3)
+ z = 1.0
+ env.unwrapped.sim.data.mocap_pos[0, 0, :] = torch.tensor(
+ [x, y, z], device=device, dtype=torch.float32
+ )
+ env.unwrapped.sim.data.mocap_quat[0, 0, :] = torch.tensor(
+ [1, 0, 0, 0], device=device, dtype=torch.float32
+ )
+ self.step_count += 1
+ return torch.zeros(env.unwrapped.action_space.shape, device=device)
+
+ policy = AutoScanPolicy()
+ else:
+
+ class PolicyZero:
+ def __call__(self, obs) -> torch.Tensor:
+ del obs
+ return torch.zeros(env.unwrapped.action_space.shape, device=device)
+
+ policy = PolicyZero()
+
+ if resolved_viewer == "native":
+ print("Launching native viewer...")
+ NativeMujocoViewer(env, policy).run()
+ elif resolved_viewer == "viser":
+ print("Launching viser viewer...")
+ ViserPlayViewer(env, policy).run()
+ else:
+ raise ValueError(f"Unknown viewer: {viewer}")
+
+ env.close()
+
+
+if __name__ == "__main__":
+ tyro.cli(main)
diff --git a/scripts/fix_mjpython_macos.sh b/scripts/fix_mjpython_macos.sh
new file mode 100755
index 000000000..51ef89a8e
--- /dev/null
+++ b/scripts/fix_mjpython_macos.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Fix mjpython on macOS by creating a symlink to libpython.
+# This is needed because mjpython expects libpython in .venv/lib/
+
+set -e
+
+VENV_DIR=".venv"
+
+if [[ "$(uname)" != "Darwin" ]]; then
+ echo "This script is only needed on macOS."
+ exit 0
+fi
+
+if [[ ! -d "$VENV_DIR" ]]; then
+ echo "Error: .venv directory not found. Run 'uv sync' first."
+ exit 1
+fi
+
+# Get Python version and prefix from the venv.
+PYTHON_VERSION=$("$VENV_DIR/bin/python" -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')")
+PYTHON_PREFIX=$("$VENV_DIR/bin/python" -c "import sys; print(sys.base_prefix)")
+DYLIB_NAME="libpython${PYTHON_VERSION}.dylib"
+
+# Find the dylib in the Python installation.
+DYLIB_PATH="$PYTHON_PREFIX/lib/$DYLIB_NAME"
+
+if [[ ! -f "$DYLIB_PATH" ]]; then
+ echo "Error: Could not find $DYLIB_PATH"
+ exit 1
+fi
+
+# Create the symlink.
+mkdir -p "$VENV_DIR/lib"
+ln -sf "$DYLIB_PATH" "$VENV_DIR/lib/$DYLIB_NAME"
+
+echo "Created symlink: $VENV_DIR/lib/$DYLIB_NAME -> $DYLIB_PATH"
diff --git a/src/mjlab/envs/manager_based_rl_env.py b/src/mjlab/envs/manager_based_rl_env.py
index 7f7eea09a..67af64aa4 100644
--- a/src/mjlab/envs/manager_based_rl_env.py
+++ b/src/mjlab/envs/manager_based_rl_env.py
@@ -353,6 +353,8 @@ def seed(seed: int = -1) -> int:
def update_visualizers(self, visualizer: DebugVisualizer) -> None:
for mod in self.manager_visualizers.values():
mod.debug_vis(visualizer)
+ for sensor in self.scene.sensors.values():
+ sensor.debug_vis(visualizer)
# Private methods.
diff --git a/src/mjlab/sensor/__init__.py b/src/mjlab/sensor/__init__.py
index 069fc97ca..895b19921 100644
--- a/src/mjlab/sensor/__init__.py
+++ b/src/mjlab/sensor/__init__.py
@@ -5,5 +5,12 @@
from mjlab.sensor.contact_sensor import ContactMatch as ContactMatch
from mjlab.sensor.contact_sensor import ContactSensor as ContactSensor
from mjlab.sensor.contact_sensor import ContactSensorCfg as ContactSensorCfg
+from mjlab.sensor.raycast_sensor import GridPatternCfg as GridPatternCfg
+from mjlab.sensor.raycast_sensor import (
+ PinholeCameraPatternCfg as PinholeCameraPatternCfg,
+)
+from mjlab.sensor.raycast_sensor import RayCastData as RayCastData
+from mjlab.sensor.raycast_sensor import RayCastSensor as RayCastSensor
+from mjlab.sensor.raycast_sensor import RayCastSensorCfg as RayCastSensorCfg
from mjlab.sensor.sensor import Sensor as Sensor
from mjlab.sensor.sensor import SensorCfg as SensorCfg
diff --git a/src/mjlab/sensor/raycast_sensor.py b/src/mjlab/sensor/raycast_sensor.py
new file mode 100644
index 000000000..8b6a835a8
--- /dev/null
+++ b/src/mjlab/sensor/raycast_sensor.py
@@ -0,0 +1,804 @@
+"""Raycast sensor for terrain and obstacle detection.
+
+Ray Patterns
+------------
+
+This module provides two ray pattern types for different use cases:
+
+**Grid Pattern** - Parallel rays in a 2D grid::
+
+ Camera at any height:
+ ↓ ↓ ↓ ↓ ↓ ← All rays point same direction
+ ↓ ↓ ↓ ↓ ↓
+ ↓ ↓ ↓ ↓ ↓
+ ──●───●───●───●───●── ← Fixed spacing (e.g., 10cm apart)
+ Ground
+
+- Rays are parallel (all point in the same direction, e.g., -Z down)
+- Spacing is defined in world units (meters)
+- Height doesn't affect the hit pattern - same footprint regardless of altitude
+- Good for: height maps, terrain scanning with consistent spatial sampling
+
+**Pinhole Camera Pattern** - Diverging rays from a single point::
+
+ Camera LOW: Camera HIGH:
+ 📷 📷
+ /|\\ / | \\
+ / | \\ / | \\
+ / | \\ / | \\
+ ─●───●───●─ ───●─────●─────●───
+ (small footprint) (large footprint)
+
+- Rays diverge from a single point (like light entering a camera)
+- FOV is fixed in angular units (degrees)
+- Higher altitude → wider ground coverage, more spread between hits
+- Lower altitude → tighter ground coverage, denser hits
+- Good for: simulating depth cameras, LiDAR with angular resolution
+
+**Pattern Comparison**:
+
+============== ==================== ==========================
+Aspect Grid Pinhole
+============== ==================== ==========================
+Ray direction All parallel Diverge from origin
+Spacing Meters Degrees (FOV)
+Height affect No Yes
+Real-world Orthographic proj. Perspective camera / LiDAR
+============== ==================== ==========================
+
+The pinhole behavior matches real depth sensors (RealSense, LiDAR) - when
+you're farther from an object, each pixel covers more area.
+
+
+Frame Attachment
+----------------
+
+Rays are attached to a frame in the scene via ``ObjRef``. Supported frame types:
+
+- **body**: Attach to a body's origin. Rays follow body position and orientation.
+- **site**: Attach to a site. Useful for precise placement or offset from body.
+- **geom**: Attach to a geometry. Useful for sensors mounted on specific parts.
+
+Example::
+
+ from mjlab.sensor import ObjRef, RayCastSensorCfg, GridPatternCfg
+
+ cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(1.0, 1.0), resolution=0.1),
+ )
+
+The ``exclude_parent_body`` option (default: True) prevents rays from hitting
+the body they're attached to.
+
+
+Ray Alignment
+-------------
+
+The ``ray_alignment`` setting controls how rays orient relative to the frame::
+
+ Robot tilted 30°:
+
+ "base" (default) "yaw" "world"
+ Rays tilt with body Rays stay level Rays fixed to world
+ ↘ ↓ ↙ ↓ ↓ ↓ ↓ ↓ ↓
+ \\|/ ||| |||
+ 🤖 ← tilted 🤖 ← tilted 🤖 ← tilted
+ / / /
+
+- **base**: Full position + rotation. Rays rotate with the body. Good for
+ body-mounted sensors that should scan relative to the robot's orientation.
+
+- **yaw**: Position + yaw only, ignores pitch/roll. Rays always point straight
+ down regardless of body tilt. Good for height maps where you want consistent
+ vertical sampling even when the robot is on a slope.
+
+- **world**: Fixed in world frame, only position follows body. Rays always
+ point in a fixed world direction. Good for gravity-aligned measurements.
+
+
+Debug Visualization
+-------------------
+
+Enable visualization with ``debug_vis=True`` and customize via ``VizCfg``::
+
+ cfg = RayCastSensorCfg(
+ name="scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(),
+ debug_vis=True,
+ viz=RayCastSensorCfg.VizCfg(
+ hit_color=(0, 1, 0, 0.8), # Green for hits
+ miss_color=(1, 0, 0, 0.4), # Red for misses
+ show_rays=True, # Draw ray arrows
+ show_normals=True, # Draw surface normals
+ normal_color=(1, 1, 0, 1), # Yellow normals
+ ),
+ )
+
+Visualization options:
+
+- ``hit_color`` / ``miss_color``: RGBA colors for ray arrows
+- ``hit_sphere_color`` / ``hit_sphere_radius``: Spheres at hit points
+- ``show_rays``: Draw arrows from origin to hit/miss points
+- ``show_normals`` / ``normal_color`` / ``normal_length``: Surface normal arrows
+
+
+Geom Group Filtering
+--------------------
+
+MuJoCo geoms can be assigned to groups 0-5. Use ``include_geom_groups`` to
+filter which groups the rays can hit::
+
+ cfg = RayCastSensorCfg(
+ name="terrain_only",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(),
+ include_geom_groups=(0, 1), # Only hit geoms in groups 0 and 1
+ )
+
+This is useful for ignoring certain geometry (e.g., visual-only geoms in
+group 3) while still detecting collisions with terrain (group 0).
+
+
+Output Data
+-----------
+
+Access sensor data via the ``data`` property, which returns ``RayCastData``:
+
+- ``distances``: [B, N] Distance to hit, or -1 if no hit / beyond max_distance
+- ``hit_pos_w``: [B, N, 3] World-space hit positions
+- ``normals_w``: [B, N, 3] Surface normals at hit points (world frame)
+- ``pos_w``: [B, 3] Sensor frame position
+- ``quat_w``: [B, 4] Sensor frame orientation (w, x, y, z)
+
+Where B = number of environments, N = number of rays.
+"""
+
+from __future__ import annotations
+
+import math
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Literal
+
+import mujoco
+import mujoco_warp as mjwarp
+import torch
+import warp as wp
+from mujoco_warp._src.ray import rays
+from mujoco_warp._src.types import vec6
+
+from mjlab.entity import Entity
+from mjlab.sensor.builtin_sensor import ObjRef
+from mjlab.sensor.sensor import Sensor, SensorCfg
+from mjlab.utils.lab_api.math import quat_from_matrix
+
+if TYPE_CHECKING:
+ from mjlab.viewer.debug_visualizer import DebugVisualizer
+
+
+# Type aliases for configuration choices.
+RayAlignment = Literal["base", "yaw", "world"]
+
+
+@dataclass
+class GridPatternCfg:
+ """Grid pattern - parallel rays in a 2D grid."""
+
+ size: tuple[float, float] = (1.0, 1.0)
+ """Grid size (length, width) in meters."""
+
+ resolution: float = 0.1
+ """Spacing between rays in meters."""
+
+ direction: tuple[float, float, float] = (0.0, 0.0, -1.0)
+ """Ray direction in frame-local coordinates."""
+
+ def generate_rays(
+ self, mj_model: mujoco.MjModel | None, device: str
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """Generate ray pattern.
+
+ Args:
+ mj_model: MuJoCo model (unused for grid pattern).
+ device: Device for tensor operations.
+
+ Returns:
+ Tuple of (local_offsets [N, 3], local_directions [N, 3]).
+ """
+ del mj_model # Unused for grid pattern
+ size_x, size_y = self.size
+ res = self.resolution
+
+ x = torch.arange(
+ -size_x / 2, size_x / 2 + res * 0.5, res, device=device, dtype=torch.float32
+ )
+ y = torch.arange(
+ -size_y / 2, size_y / 2 + res * 0.5, res, device=device, dtype=torch.float32
+ )
+ grid_x, grid_y = torch.meshgrid(x, y, indexing="xy")
+
+ num_rays = grid_x.numel()
+ local_offsets = torch.zeros((num_rays, 3), device=device, dtype=torch.float32)
+ local_offsets[:, 0] = grid_x.flatten()
+ local_offsets[:, 1] = grid_y.flatten()
+
+ # All rays share the same direction for grid pattern.
+ direction = torch.tensor(self.direction, device=device, dtype=torch.float32)
+ direction = direction / direction.norm()
+ local_directions = direction.unsqueeze(0).expand(num_rays, 3).clone()
+
+ return local_offsets, local_directions
+
+
+@dataclass
+class PinholeCameraPatternCfg:
+ """Pinhole camera pattern - rays diverging from origin like a camera.
+
+ Can be configured with explicit parameters (width, height, fovy) or created
+ via factory methods like from_mujoco_camera() or from_intrinsic_matrix().
+ """
+
+ width: int = 16
+ """Image width in pixels."""
+
+ height: int = 12
+ """Image height in pixels."""
+
+ fovy: float = 45.0
+ """Vertical field of view in degrees (matches MuJoCo convention)."""
+
+ _camera_name: str | None = field(default=None, repr=False)
+ """Internal: MuJoCo camera name for deferred parameter resolution."""
+
+ @classmethod
+ def from_mujoco_camera(cls, camera_name: str) -> PinholeCameraPatternCfg:
+ """Create config that references a MuJoCo camera.
+
+ Camera parameters (resolution, FOV) are resolved at runtime from the model.
+
+ Args:
+ camera_name: Name of the MuJoCo camera to reference.
+
+ Returns:
+ Config that will resolve parameters from the MuJoCo camera.
+ """
+ # Placeholder values; actual values resolved in generate_rays().
+ return cls(width=0, height=0, fovy=0.0, _camera_name=camera_name)
+
+ @classmethod
+ def from_intrinsic_matrix(
+ cls, intrinsic_matrix: list[float], width: int, height: int
+ ) -> PinholeCameraPatternCfg:
+ """Create from 3x3 intrinsic matrix [fx, 0, cx, 0, fy, cy, 0, 0, 1].
+
+ Args:
+ intrinsic_matrix: Flattened 3x3 intrinsic matrix.
+ width: Image width in pixels.
+ height: Image height in pixels.
+
+ Returns:
+ Config with fovy computed from the intrinsic matrix.
+ """
+ fy = intrinsic_matrix[4] # fy is at position [1,1] in the matrix
+ fovy = 2 * math.atan(height / (2 * fy)) * 180 / math.pi
+ return cls(width=width, height=height, fovy=fovy)
+
+ def generate_rays(
+ self, mj_model: mujoco.MjModel | None, device: str
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """Generate ray pattern.
+
+ Args:
+ mj_model: MuJoCo model (required if using from_mujoco_camera).
+ device: Device for tensor operations.
+
+ Returns:
+ Tuple of (local_offsets [N, 3], local_directions [N, 3]).
+ """
+ # Resolve camera parameters.
+ if self._camera_name is not None:
+ if mj_model is None:
+ raise ValueError("MuJoCo model required when using from_mujoco_camera()")
+ # Get parameters from MuJoCo camera.
+ cam_id = mj_model.camera(self._camera_name).id
+ width, height = mj_model.cam_resolution[cam_id]
+
+ # MuJoCo has two camera modes:
+ # 1. fovy mode: sensorsize is zero, use cam_fovy directly
+ # 2. Physical sensor mode: sensorsize > 0, compute from focal/sensorsize
+ sensorsize = mj_model.cam_sensorsize[cam_id]
+ if sensorsize[0] > 0 and sensorsize[1] > 0:
+ # Physical sensor model.
+ intrinsic = mj_model.cam_intrinsic[cam_id] # [fx, fy, cx, cy]
+ focal = intrinsic[:2] # [fx, fy]
+ h_fov_rad = 2 * math.atan(sensorsize[0] / (2 * focal[0]))
+ v_fov_rad = 2 * math.atan(sensorsize[1] / (2 * focal[1]))
+ else:
+ # Read vertical FOV directly from MuJoCo.
+ v_fov_rad = math.radians(mj_model.cam_fovy[cam_id])
+ aspect = width / height
+ h_fov_rad = 2 * math.atan(math.tan(v_fov_rad / 2) * aspect)
+ else:
+ # Use explicit parameters.
+ width = self.width
+ height = self.height
+ v_fov_rad = math.radians(self.fovy)
+ aspect = width / height
+ h_fov_rad = 2 * math.atan(math.tan(v_fov_rad / 2) * aspect)
+
+ # Create normalized pixel coordinates [-1, 1].
+ u = torch.linspace(-1, 1, width, device=device, dtype=torch.float32)
+ v = torch.linspace(-1, 1, height, device=device, dtype=torch.float32)
+ grid_u, grid_v = torch.meshgrid(u, v, indexing="xy")
+
+ # Convert to ray directions (MuJoCo camera: -Z forward, +X right, +Y down).
+ ray_x = grid_u.flatten() * math.tan(h_fov_rad / 2)
+ ray_y = grid_v.flatten() * math.tan(v_fov_rad / 2)
+ ray_z = -torch.ones_like(ray_x) # Negative Z for MuJoCo camera forward
+
+ num_rays = width * height
+ local_offsets = torch.zeros((num_rays, 3), device=device)
+ local_directions = torch.stack([ray_x, ray_y, ray_z], dim=1)
+ local_directions = local_directions / local_directions.norm(dim=1, keepdim=True)
+
+ return local_offsets, local_directions
+
+
+PatternCfg = GridPatternCfg | PinholeCameraPatternCfg
+
+
+@dataclass
+class RayCastData:
+ """Raycast sensor output data."""
+
+ distances: torch.Tensor
+ """[B, N] Distance to hit point. -1 if no hit."""
+
+ normals_w: torch.Tensor
+ """[B, N, 3] Surface normal at hit point (world frame). Zero if no hit."""
+
+ hit_pos_w: torch.Tensor
+ """[B, N, 3] Hit position in world frame. Zero if no hit."""
+
+ pos_w: torch.Tensor
+ """[B, 3] Frame position in world coordinates."""
+
+ quat_w: torch.Tensor
+ """[B, 4] Frame orientation quaternion (w, x, y, z) in world coordinates."""
+
+
+@dataclass
+class RayCastSensorCfg(SensorCfg):
+ """Raycast sensor configuration.
+
+ Supports multiple ray patterns (grid, pinhole camera) and alignment modes.
+ """
+
+ @dataclass
+ class VizCfg:
+ """Visualization settings for debug rendering."""
+
+ hit_color: tuple[float, float, float, float] = (0.0, 1.0, 0.0, 0.8)
+ """RGBA color for rays that hit a surface."""
+
+ miss_color: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.4)
+ """RGBA color for rays that miss."""
+
+ hit_sphere_color: tuple[float, float, float, float] = (0.0, 1.0, 1.0, 1.0)
+ """RGBA color for spheres drawn at hit points."""
+
+ hit_sphere_radius: float = 0.5
+ """Radius of spheres drawn at hit points (multiplier of meansize)."""
+
+ show_rays: bool = False
+ """Whether to draw ray arrows."""
+
+ show_normals: bool = False
+ """Whether to draw surface normals at hit points."""
+
+ normal_color: tuple[float, float, float, float] = (1.0, 1.0, 0.0, 1.0)
+ """RGBA color for surface normal arrows."""
+
+ normal_length: float = 5.0
+ """Length of surface normal arrows (multiplier of meansize)."""
+
+ frame: ObjRef
+ """Body or site to attach rays to."""
+
+ pattern: PatternCfg = field(default_factory=GridPatternCfg)
+ """Ray pattern configuration. Defaults to GridPatternCfg."""
+
+ ray_alignment: RayAlignment = "base"
+ """How rays align with the frame.
+
+ - "base": Full position + rotation (default).
+ - "yaw": Position + yaw only, ignores pitch/roll (good for height maps).
+ - "world": Fixed in world frame, position only follows body.
+ """
+
+ max_distance: float = 10.0
+ """Maximum ray distance. Rays beyond this report -1."""
+
+ exclude_parent_body: bool = True
+ """Exclude parent body from ray intersection tests."""
+
+ include_geom_groups: tuple[int, ...] | None = None
+ """Geom groups (0-5) to include in raycasting. None means all groups."""
+
+ debug_vis: bool = False
+ """Enable debug visualization."""
+
+ viz: VizCfg = field(default_factory=VizCfg)
+ """Visualization settings."""
+
+ def build(self) -> RayCastSensor:
+ return RayCastSensor(self)
+
+
+class RayCastSensor(Sensor[RayCastData]):
+ """Raycast sensor for terrain and obstacle detection."""
+
+ def __init__(self, cfg: RayCastSensorCfg) -> None:
+ self.cfg = cfg
+ self._data: mjwarp.Data | None = None
+ self._model: mjwarp.Model | None = None
+ self._mj_model: mujoco.MjModel | None = None
+ self._device: str | None = None
+ self._wp_device: wp.context.Device | None = None
+
+ self._frame_body_id: int | None = None
+ self._frame_site_id: int | None = None
+ self._frame_geom_id: int | None = None
+ self._frame_type: Literal["body", "site", "geom"] = "body"
+
+ self._local_offsets: torch.Tensor | None = None
+ self._local_directions: torch.Tensor | None = None # [N, 3] per-ray directions
+ self._num_rays: int = 0
+
+ self._ray_pnt: wp.array | None = None
+ self._ray_vec: wp.array | None = None
+ self._ray_dist: wp.array | None = None
+ self._ray_geomid: wp.array | None = None
+ self._ray_normal: wp.array | None = None
+ self._ray_bodyexclude: wp.array | None = None
+ self._geomgroup: vec6 = vec6(-1, -1, -1, -1, -1, -1)
+
+ self._distances: torch.Tensor | None = None
+ self._normals_w: torch.Tensor | None = None
+ self._hit_pos_w: torch.Tensor | None = None
+ self._pos_w: torch.Tensor | None = None
+ self._quat_w: torch.Tensor | None = None
+
+ self._raycast_graph: wp.Graph | None = None
+ self._use_cuda_graph: bool = False
+
+ def edit_spec(
+ self,
+ scene_spec: mujoco.MjSpec,
+ entities: dict[str, Entity],
+ ) -> None:
+ del scene_spec, entities
+
+ def initialize(
+ self,
+ mj_model: mujoco.MjModel,
+ model: mjwarp.Model,
+ data: mjwarp.Data,
+ device: str,
+ ) -> None:
+ self._data = data
+ self._model = model
+ self._mj_model = mj_model
+ self._device = device
+ self._wp_device = wp.get_device(device)
+ num_envs = data.nworld
+
+ frame = self.cfg.frame
+ frame_name = frame.prefixed_name()
+
+ if frame.type == "body":
+ self._frame_body_id = mj_model.body(frame_name).id
+ self._frame_type = "body"
+ elif frame.type == "site":
+ self._frame_site_id = mj_model.site(frame_name).id
+ # Look up parent body for exclusion.
+ self._frame_body_id = int(mj_model.site_bodyid[self._frame_site_id])
+ self._frame_type = "site"
+ elif frame.type == "geom":
+ self._frame_geom_id = mj_model.geom(frame_name).id
+ # Look up parent body for exclusion.
+ self._frame_body_id = int(mj_model.geom_bodyid[self._frame_geom_id])
+ self._frame_type = "geom"
+ else:
+ raise ValueError(
+ f"RayCastSensor frame must be 'body', 'site', or 'geom', got '{frame.type}'"
+ )
+
+ # Generate ray pattern.
+ pattern = self.cfg.pattern
+ self._local_offsets, self._local_directions = pattern.generate_rays(
+ mj_model, device
+ )
+ self._num_rays = self._local_offsets.shape[0]
+
+ self._ray_pnt = wp.zeros((num_envs, self._num_rays), dtype=wp.vec3, device=device)
+ self._ray_vec = wp.zeros((num_envs, self._num_rays), dtype=wp.vec3, device=device)
+ self._ray_dist = wp.zeros((num_envs, self._num_rays), dtype=float, device=device)
+ self._ray_geomid = wp.zeros((num_envs, self._num_rays), dtype=int, device=device)
+ self._ray_normal = wp.zeros(
+ (num_envs, self._num_rays), dtype=wp.vec3, device=device
+ )
+
+ body_exclude = (
+ self._frame_body_id
+ if self.cfg.exclude_parent_body and self._frame_body_id is not None
+ else -1
+ )
+ self._ray_bodyexclude = wp.full(
+ (self._num_rays,),
+ body_exclude,
+ dtype=int, # type: ignore
+ device=device,
+ )
+
+ # Convert include_geom_groups to vec6 format (-1 = include, 0 = exclude).
+ if self.cfg.include_geom_groups is not None:
+ groups = [0, 0, 0, 0, 0, 0]
+ for g in self.cfg.include_geom_groups:
+ if 0 <= g <= 5:
+ groups[g] = -1
+ self._geomgroup = vec6(*groups)
+ else:
+ self._geomgroup = vec6(-1, -1, -1, -1, -1, -1) # All groups
+
+ assert self._wp_device is not None
+ self._use_cuda_graph = self._wp_device.is_cuda and wp.is_mempool_enabled(
+ self._wp_device
+ )
+ if self._use_cuda_graph:
+ self._create_graph()
+
+ def _create_graph(self) -> None:
+ """Capture CUDA graph for raycast operation."""
+ assert self._wp_device is not None and self._wp_device.is_cuda
+ with wp.ScopedDevice(self._wp_device):
+ with wp.ScopedCapture() as capture:
+ rays(
+ m=self._model.struct, # type: ignore[attr-defined]
+ d=self._data.struct, # type: ignore[attr-defined]
+ pnt=self._ray_pnt,
+ vec=self._ray_vec,
+ geomgroup=self._geomgroup,
+ flg_static=True,
+ bodyexclude=self._ray_bodyexclude,
+ dist=self._ray_dist,
+ geomid=self._ray_geomid,
+ normal=self._ray_normal,
+ )
+ self._raycast_graph = capture.graph
+
+ @property
+ def data(self) -> RayCastData:
+ self._perform_raycast()
+ assert self._distances is not None and self._normals_w is not None
+ assert self._hit_pos_w is not None
+ assert self._pos_w is not None and self._quat_w is not None
+ return RayCastData(
+ distances=self._distances,
+ normals_w=self._normals_w,
+ hit_pos_w=self._hit_pos_w,
+ pos_w=self._pos_w,
+ quat_w=self._quat_w,
+ )
+
+ @property
+ def num_rays(self) -> int:
+ return self._num_rays
+
+ def _perform_raycast(self) -> None:
+ assert self._data is not None and self._model is not None
+ assert self._local_offsets is not None and self._local_directions is not None
+
+ if self._frame_type == "body":
+ frame_pos = self._data.xpos[:, self._frame_body_id]
+ frame_mat = self._data.xmat[:, self._frame_body_id].view(-1, 3, 3)
+ elif self._frame_type == "site":
+ frame_pos = self._data.site_xpos[:, self._frame_site_id]
+ frame_mat = self._data.site_xmat[:, self._frame_site_id].view(-1, 3, 3)
+ else: # geom
+ frame_pos = self._data.geom_xpos[:, self._frame_geom_id]
+ frame_mat = self._data.geom_xmat[:, self._frame_geom_id].view(-1, 3, 3)
+
+ num_envs = frame_pos.shape[0]
+
+ # Apply ray alignment.
+ rot_mat = self._compute_alignment_rotation(frame_mat)
+
+ # Transform ray origins.
+ world_offsets = torch.einsum("bij,nj->bni", rot_mat, self._local_offsets)
+ world_origins = frame_pos.unsqueeze(1) + world_offsets
+
+ # Transform ray directions (per-ray).
+ world_rays = torch.einsum("bij,nj->bni", rot_mat, self._local_directions)
+
+ pnt_torch = wp.to_torch(self._ray_pnt).view(num_envs, self._num_rays, 3)
+ vec_torch = wp.to_torch(self._ray_vec).view(num_envs, self._num_rays, 3)
+ pnt_torch.copy_(world_origins)
+ vec_torch.copy_(world_rays)
+
+ if self._use_cuda_graph and self._raycast_graph is not None:
+ with wp.ScopedDevice(self._wp_device):
+ wp.capture_launch(self._raycast_graph)
+ else:
+ rays(
+ m=self._model.struct, # type: ignore[attr-defined]
+ d=self._data.struct, # type: ignore[attr-defined]
+ pnt=self._ray_pnt,
+ vec=self._ray_vec,
+ geomgroup=self._geomgroup,
+ flg_static=True,
+ bodyexclude=self._ray_bodyexclude,
+ dist=self._ray_dist,
+ geomid=self._ray_geomid,
+ normal=self._ray_normal,
+ )
+
+ self._distances = wp.to_torch(self._ray_dist)
+ self._normals_w = wp.to_torch(self._ray_normal).view(num_envs, self._num_rays, 3)
+ self._distances[self._distances > self.cfg.max_distance] = -1.0
+
+ # Compute hit positions: origin + direction * distance.
+ # For misses (distance = -1), hit_pos_w will be invalid (but normals_w are zero).
+ assert self._distances is not None
+ hit_mask = self._distances >= 0
+ hit_pos_w = world_origins.clone()
+ hit_pos_w[hit_mask] = world_origins[hit_mask] + world_rays[
+ hit_mask
+ ] * self._distances[hit_mask].unsqueeze(-1)
+ self._hit_pos_w = hit_pos_w
+
+ self._pos_w = frame_pos.clone()
+ self._quat_w = quat_from_matrix(frame_mat)
+
+ def _compute_alignment_rotation(self, frame_mat: torch.Tensor) -> torch.Tensor:
+ """Compute rotation matrix based on ray_alignment setting."""
+ if self.cfg.ray_alignment == "base":
+ # Full rotation.
+ return frame_mat
+ elif self.cfg.ray_alignment == "yaw":
+ # Extract yaw only, zero out pitch/roll.
+ return self._extract_yaw_rotation(frame_mat)
+ elif self.cfg.ray_alignment == "world":
+ # Identity rotation (world-aligned).
+ num_envs = frame_mat.shape[0]
+ return (
+ torch.eye(3, device=frame_mat.device, dtype=frame_mat.dtype)
+ .unsqueeze(0)
+ .expand(num_envs, -1, -1)
+ )
+ else:
+ raise ValueError(f"Unknown ray_alignment: {self.cfg.ray_alignment}")
+
+ def _extract_yaw_rotation(self, rot_mat: torch.Tensor) -> torch.Tensor:
+ """Extract yaw-only rotation matrix (rotation around Z axis).
+
+ Handles the singularity at ±90° pitch by falling back to the Y-axis
+ when the X-axis projection onto the XY plane is too small.
+ """
+ batch_size = rot_mat.shape[0]
+ device = rot_mat.device
+ dtype = rot_mat.dtype
+
+ # Project X-axis onto XY plane.
+ x_axis = rot_mat[:, :, 0] # First column [B, 3]
+ x_proj = x_axis.clone()
+ x_proj[:, 2] = 0 # Zero out Z component
+ x_norm = x_proj.norm(dim=1) # [B]
+
+ # Check for singularity (X-axis nearly vertical).
+ threshold = 0.1
+ singular = x_norm < threshold # [B]
+
+ # For singular cases, use Y-axis instead.
+ if singular.any():
+ y_axis = rot_mat[:, :, 1] # Second column [B, 3]
+ y_proj = y_axis.clone()
+ y_proj[:, 2] = 0
+ y_norm = y_proj.norm(dim=1).clamp(min=1e-6)
+ y_proj = y_proj / y_norm.unsqueeze(-1)
+ # Y-axis points left; rotate -90° around Z to get forward direction.
+ # [y_x, y_y] -> [y_y, -y_x]
+ x_from_y = torch.zeros_like(y_proj)
+ x_from_y[:, 0] = y_proj[:, 1]
+ x_from_y[:, 1] = -y_proj[:, 0]
+ x_proj[singular] = x_from_y[singular]
+ x_norm[singular] = 1.0 # Already normalized
+
+ # Normalize X projection.
+ x_norm = x_norm.clamp(min=1e-6)
+ x_proj = x_proj / x_norm.unsqueeze(-1)
+
+ # Build yaw-only rotation matrix.
+ yaw_mat = torch.zeros((batch_size, 3, 3), device=device, dtype=dtype)
+ yaw_mat[:, 0, 0] = x_proj[:, 0]
+ yaw_mat[:, 1, 0] = x_proj[:, 1]
+ yaw_mat[:, 0, 1] = -x_proj[:, 1]
+ yaw_mat[:, 1, 1] = x_proj[:, 0]
+ yaw_mat[:, 2, 2] = 1
+ return yaw_mat
+
+ def debug_vis(self, visualizer: DebugVisualizer) -> None:
+ if not self.cfg.debug_vis:
+ return
+ assert self._data is not None
+ assert self._local_offsets is not None
+ assert self._local_directions is not None
+
+ env_idx = visualizer.env_idx
+ data = self.data
+
+ if self._frame_type == "body":
+ frame_pos = self._data.xpos[env_idx, self._frame_body_id].cpu().numpy()
+ frame_mat_tensor = self._data.xmat[env_idx, self._frame_body_id].view(3, 3)
+ elif self._frame_type == "site":
+ frame_pos = self._data.site_xpos[env_idx, self._frame_site_id].cpu().numpy()
+ frame_mat_tensor = self._data.site_xmat[env_idx, self._frame_site_id].view(3, 3)
+ else: # geom
+ frame_pos = self._data.geom_xpos[env_idx, self._frame_geom_id].cpu().numpy()
+ frame_mat_tensor = self._data.geom_xmat[env_idx, self._frame_geom_id].view(3, 3)
+
+ # Apply ray alignment for visualization.
+ rot_mat_tensor = self._compute_alignment_rotation(frame_mat_tensor.unsqueeze(0))[0]
+ rot_mat = rot_mat_tensor.cpu().numpy()
+
+ local_offsets_np = self._local_offsets.cpu().numpy()
+ local_dirs_np = self._local_directions.cpu().numpy()
+ hit_positions_np = data.hit_pos_w[env_idx].cpu().numpy()
+ distances_np = data.distances[env_idx].cpu().numpy()
+ normals_np = data.normals_w[env_idx].cpu().numpy()
+
+ meansize = visualizer.meansize
+ ray_width = 0.1 * meansize
+ sphere_radius = self.cfg.viz.hit_sphere_radius * meansize
+ normal_length = self.cfg.viz.normal_length * meansize
+ normal_width = 0.1 * meansize
+
+ for i in range(self._num_rays):
+ origin = frame_pos + rot_mat @ local_offsets_np[i]
+ hit = distances_np[i] >= 0
+
+ if hit:
+ end = hit_positions_np[i]
+ color = self.cfg.viz.hit_color
+ else:
+ direction = rot_mat @ local_dirs_np[i]
+ end = origin + direction * min(0.5, self.cfg.max_distance * 0.05)
+ color = self.cfg.viz.miss_color
+
+ if self.cfg.viz.show_rays:
+ visualizer.add_arrow(
+ start=origin,
+ end=end,
+ color=color,
+ width=ray_width,
+ label=f"{self.cfg.name}_ray_{i}",
+ )
+
+ if hit:
+ visualizer.add_sphere(
+ center=end,
+ radius=sphere_radius,
+ color=self.cfg.viz.hit_sphere_color,
+ label=f"{self.cfg.name}_hit_{i}",
+ )
+ if self.cfg.viz.show_normals:
+ normal_end = end + normals_np[i] * normal_length
+ visualizer.add_arrow(
+ start=end,
+ end=normal_end,
+ color=self.cfg.viz.normal_color,
+ width=normal_width,
+ label=f"{self.cfg.name}_normal_{i}",
+ )
diff --git a/src/mjlab/sensor/sensor.py b/src/mjlab/sensor/sensor.py
index e20e1d9d2..279c72a13 100644
--- a/src/mjlab/sensor/sensor.py
+++ b/src/mjlab/sensor/sensor.py
@@ -12,6 +12,7 @@
if TYPE_CHECKING:
from mjlab.entity import Entity
+ from mjlab.viewer.debug_visualizer import DebugVisualizer
T = TypeVar("T")
@@ -109,3 +110,14 @@ def update(self, dt: float) -> None:
dt: Time step in seconds.
"""
del dt # Unused.
+
+ def debug_vis(self, visualizer: DebugVisualizer) -> None:
+ """Visualize sensor data for debugging.
+
+ Base implementation does nothing. Override in subclasses that support
+ debug visualization.
+
+ Args:
+ visualizer: The debug visualizer to draw to.
+ """
+ del visualizer # Unused.
diff --git a/src/mjlab/viewer/debug_visualizer.py b/src/mjlab/viewer/debug_visualizer.py
index 2e845ed85..c09dc3fcb 100644
--- a/src/mjlab/viewer/debug_visualizer.py
+++ b/src/mjlab/viewer/debug_visualizer.py
@@ -22,6 +22,12 @@ class DebugVisualizer(ABC):
env_idx: int
"""Index of the environment being visualized."""
+ @property
+ @abstractmethod
+ def meansize(self) -> float:
+ """Mean size of the model, used for scaling visualization elements."""
+ ...
+
@abstractmethod
def add_arrow(
self,
@@ -136,8 +142,13 @@ def clear(self) -> None:
class NullDebugVisualizer:
"""No-op visualizer when visualization is disabled."""
- def __init__(self, env_idx: int = 0):
+ def __init__(self, env_idx: int = 0, meansize: float = 0.1):
self.env_idx = env_idx
+ self._meansize = meansize
+
+ @property
+ def meansize(self) -> float:
+ return self._meansize
def add_arrow(self, start, end, color, width=0.015, label=None) -> None:
pass
diff --git a/src/mjlab/viewer/native/viewer.py b/src/mjlab/viewer/native/viewer.py
index 2e5ac75e0..feb96ca46 100644
--- a/src/mjlab/viewer/native/viewer.py
+++ b/src/mjlab/viewer/native/viewer.py
@@ -128,8 +128,9 @@ def sync_env_to_viewer(self) -> None:
with self._mj_lock:
sim_data = self.env.unwrapped.sim.data
- self.mjd.qpos[:] = sim_data.qpos[self.env_idx].cpu().numpy()
- self.mjd.qvel[:] = sim_data.qvel[self.env_idx].cpu().numpy()
+ if self.mjm.nq > 0:
+ self.mjd.qpos[:] = sim_data.qpos[self.env_idx].cpu().numpy()
+ self.mjd.qvel[:] = sim_data.qvel[self.env_idx].cpu().numpy()
if self.mjm.nmocap > 0:
self.mjd.mocap_pos[:] = sim_data.mocap_pos[self.env_idx].cpu().numpy()
self.mjd.mocap_quat[:] = sim_data.mocap_quat[self.env_idx].cpu().numpy()
@@ -195,14 +196,28 @@ def sync_env_to_viewer(self) -> None:
v.sync(state_only=True)
def sync_viewer_to_env(self) -> None:
- """Copy perturbation forces from viewer to env (when not paused)."""
- if not (self.enable_perturbations and not self._is_paused and self.mjd):
+ """Copy perturbation forces and mocap poses from viewer to env."""
+ if not self.enable_perturbations or self._is_paused or not self.mjd:
return
+ assert self.mjm is not None
with self._mj_lock:
xfrc = torch.as_tensor(
self.mjd.xfrc_applied, dtype=torch.float, device=self.env.device
)
- self.env.unwrapped.sim.data.xfrc_applied[:] = xfrc[None]
+ if self.mjm.nmocap > 0:
+ mocap_pos = torch.as_tensor(
+ self.mjd.mocap_pos, dtype=torch.float, device=self.env.device
+ )
+ mocap_quat = torch.as_tensor(
+ self.mjd.mocap_quat, dtype=torch.float, device=self.env.device
+ )
+ else:
+ mocap_pos = mocap_quat = None
+ sim_data = self.env.unwrapped.sim.data
+ sim_data.xfrc_applied[:] = xfrc[None]
+ if mocap_pos is not None and mocap_quat is not None:
+ sim_data.mocap_pos[:] = mocap_pos[None]
+ sim_data.mocap_quat[:] = mocap_quat[None]
def close(self) -> None:
"""Close viewer and cleanup."""
diff --git a/src/mjlab/viewer/native/visualizer.py b/src/mjlab/viewer/native/visualizer.py
index dcb68f25f..a93f17e18 100644
--- a/src/mjlab/viewer/native/visualizer.py
+++ b/src/mjlab/viewer/native/visualizer.py
@@ -29,12 +29,18 @@ def __init__(self, scn: mujoco.MjvScene, mj_model: mujoco.MjModel, env_idx: int)
self.mj_model = mj_model
self.env_idx = env_idx
self._initial_geom_count = scn.ngeom
+ self._meansize: float = mj_model.stat.meansize
self._vopt = mujoco.MjvOption()
self._vopt.flags[mujoco.mjtVisFlag.mjVIS_TRANSPARENT] = True
self._pert = mujoco.MjvPerturb()
self._viz_data = mujoco.MjData(mj_model)
+ @property
+ @override
+ def meansize(self) -> float:
+ return self._meansize
+
@override
def add_arrow(
self,
diff --git a/src/mjlab/viewer/offscreen_renderer.py b/src/mjlab/viewer/offscreen_renderer.py
index f98b9f866..a3c539ff2 100644
--- a/src/mjlab/viewer/offscreen_renderer.py
+++ b/src/mjlab/viewer/offscreen_renderer.py
@@ -54,16 +54,22 @@ def update(
self,
data: Any,
debug_vis_callback: Callable[[MujocoNativeDebugVisualizer], None] | None = None,
+ camera: str | None = None,
) -> None:
"""Update renderer with simulation data."""
if self._renderer is None:
raise ValueError("Renderer not initialized. Call 'initialize()' first.")
env_idx = self._cfg.env_idx
- self._data.qpos[:] = data.qpos[env_idx].cpu().numpy()
- self._data.qvel[:] = data.qvel[env_idx].cpu().numpy()
+ if self._model.nq > 0:
+ self._data.qpos[:] = data.qpos[env_idx].cpu().numpy()
+ self._data.qvel[:] = data.qvel[env_idx].cpu().numpy()
+ if self._model.nmocap > 0:
+ self._data.mocap_pos[:] = data.mocap_pos[env_idx].cpu().numpy()
+ self._data.mocap_quat[:] = data.mocap_quat[env_idx].cpu().numpy()
mujoco.mj_forward(self._model, self._data)
- self._renderer.update_scene(self._data, camera=self._cam)
+ cam = camera if camera is not None else self._cam
+ self._renderer.update_scene(self._data, camera=cam)
# Note: update_scene() resets the scene each frame, so no need to manually clear.
if debug_vis_callback is not None:
@@ -73,10 +79,14 @@ def update(
debug_vis_callback(visualizer)
# Add additional environments as geoms.
- nworld = data.qpos.shape[0]
+ nworld = data.nworld
for i in range(min(nworld, _MAX_ENVS)):
- self._data.qpos[:] = data.qpos[i].cpu().numpy()
- self._data.qvel[:] = data.qvel[i].cpu().numpy()
+ if self._model.nq > 0:
+ self._data.qpos[:] = data.qpos[i].cpu().numpy()
+ self._data.qvel[:] = data.qvel[i].cpu().numpy()
+ if self._model.nmocap > 0:
+ self._data.mocap_pos[:] = data.mocap_pos[i].cpu().numpy()
+ self._data.mocap_quat[:] = data.mocap_quat[i].cpu().numpy()
mujoco.mj_forward(self._model, self._data)
mujoco.mjv_addGeoms(
self._model,
diff --git a/src/mjlab/viewer/viser/scene.py b/src/mjlab/viewer/viser/scene.py
index 1453beb26..d29f6f913 100644
--- a/src/mjlab/viewer/viser/scene.py
+++ b/src/mjlab/viewer/viser/scene.py
@@ -880,6 +880,11 @@ def _update_contact_visualization(
# DebugVisualizer Protocol Implementation
# ============================================================================
+ @property
+ @override
+ def meansize(self) -> float:
+ return self.meansize_override or self.mj_model.stat.meansize
+
@override
def add_arrow(
self,
diff --git a/tests/test_raycast_sensor.py b/tests/test_raycast_sensor.py
new file mode 100644
index 000000000..5daaf1924
--- /dev/null
+++ b/tests/test_raycast_sensor.py
@@ -0,0 +1,1075 @@
+"""Tests for raycast_sensor.py."""
+
+from __future__ import annotations
+
+import math
+
+import mujoco
+import pytest
+import torch
+from conftest import get_test_device
+
+from mjlab.entity import EntityCfg
+from mjlab.scene import Scene, SceneCfg
+from mjlab.sensor import (
+ GridPatternCfg,
+ ObjRef,
+ PinholeCameraPatternCfg,
+ RayCastData,
+ RayCastSensorCfg,
+)
+from mjlab.sim.sim import Simulation, SimulationCfg
+
+
+@pytest.fixture(scope="module")
+def device():
+ """Test device fixture."""
+ return get_test_device()
+
+
+@pytest.fixture(scope="module")
+def robot_with_floor_xml():
+ """XML for a floating body above a ground plane."""
+ return """
+
+
+
+
+
+
+
+
+
+
+ """
+
+
+@pytest.fixture(scope="module")
+def scene_with_obstacles_xml():
+ """XML for a body above various obstacles."""
+ return """
+
+
+
+
+
+
+
+
+
+
+
+
+ """
+
+
+def test_basic_raycast_hit_detection(robot_with_floor_xml, device):
+ """Verify rays detect the ground plane and return correct distances."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(
+ size=(0.5, 0.5), resolution=0.25, direction=(0.0, 0.0, -1.0)
+ ),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=2,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=2, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["terrain_scan"]
+ sim.step()
+ data = sensor.data
+
+ assert isinstance(data, RayCastData)
+ assert data.distances.shape[0] == 2 # num_envs
+ assert data.distances.shape[1] == sensor.num_rays
+ assert data.normals_w.shape == (2, sensor.num_rays, 3)
+
+ # All rays should hit the floor (distance > 0).
+ assert torch.all(data.distances >= 0)
+
+ # Distance should be approximately 2m (body at z=2, floor at z=0).
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 2.0), atol=0.1)
+
+
+def test_raycast_normals_point_up(robot_with_floor_xml, device):
+ """Verify surface normals point upward when hitting a horizontal floor."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(
+ size=(0.3, 0.3), resolution=0.15, direction=(0.0, 0.0, -1.0)
+ ),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["terrain_scan"]
+ sim.step()
+ data = sensor.data
+
+ # Normals should point up (+Z) for a horizontal floor.
+ assert torch.allclose(
+ data.normals_w[:, :, 2], torch.ones_like(data.normals_w[:, :, 2])
+ )
+ assert torch.allclose(
+ data.normals_w[:, :, 0], torch.zeros_like(data.normals_w[:, :, 0])
+ )
+ assert torch.allclose(
+ data.normals_w[:, :, 1], torch.zeros_like(data.normals_w[:, :, 1])
+ )
+
+
+def test_raycast_miss_returns_negative_one(device):
+ """Verify rays that miss return distance of -1."""
+ # Scene with no floor - rays will miss.
+ no_floor_xml = """
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(no_floor_xml))
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(
+ size=(0.3, 0.3), resolution=0.15, direction=(0.0, 0.0, -1.0)
+ ),
+ max_distance=10.0,
+ exclude_parent_body=True,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["terrain_scan"]
+ sim.step()
+ data = sensor.data
+
+ # All rays should miss (distance = -1).
+ assert torch.all(data.distances == -1)
+
+
+def test_raycast_exclude_parent_body(robot_with_floor_xml, device):
+ """Verify parent body is excluded from ray intersection when configured."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ # With exclude_parent_body=True, rays should pass through the robot body.
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.1, 0.1), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ exclude_parent_body=True,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["terrain_scan"]
+ sim.step()
+ data = sensor.data
+
+ # Rays should hit the floor, not the parent body geom.
+ # Floor is at z=0, body is at z=2, so distance should be ~2m.
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 2.0), atol=0.1)
+
+
+def test_raycast_include_geom_groups(device):
+ """Verify include_geom_groups filters which geoms are hit."""
+ # Scene with floor in group 0 and a box in group 1.
+ groups_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(groups_xml))
+
+ # Only include group 0 (floor) - should skip the platform in group 1.
+ raycast_cfg = RayCastSensorCfg(
+ name="group_filter_test",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ include_geom_groups=(0,), # Only hit floor, not platform
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["group_filter_test"]
+ sim.step()
+ data = sensor.data
+
+ # Should hit floor at z=0, not platform at z=1.1. Distance from z=3 to z=0 is 3m.
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 3.0), atol=0.1)
+
+ # Now test with group 1 included - should hit platform instead.
+ raycast_cfg_group1 = RayCastSensorCfg(
+ name="group1_test",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ include_geom_groups=(1,), # Only hit platform
+ )
+
+ scene_cfg2 = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg_group1,),
+ )
+
+ scene2 = Scene(scene_cfg2, device)
+ model2 = scene2.compile()
+ sim2 = Simulation(num_envs=1, cfg=sim_cfg, model=model2, device=device)
+ scene2.initialize(sim2.mj_model, sim2.model, sim2.data)
+
+ sensor2 = scene2["group1_test"]
+ sim2.step()
+ data2 = sensor2.data
+
+ # Should hit platform at z=1.1. Distance from z=3 to z=1.1 is 1.9m.
+ assert torch.allclose(
+ data2.distances, torch.full_like(data2.distances, 1.9), atol=0.1
+ )
+
+
+def test_raycast_frame_attachment_geom(device):
+ """Verify rays can be attached to a geom frame."""
+ geom_xml = """
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(geom_xml))
+
+ raycast_cfg = RayCastSensorCfg(
+ name="geom_scan",
+ frame=ObjRef(type="geom", name="sensor_mount", entity="robot"),
+ pattern=GridPatternCfg(size=(0.2, 0.2), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["geom_scan"]
+ sim.step()
+ data = sensor.data
+
+ assert isinstance(data, RayCastData)
+ # Geom is at z=1.95 (body at z=2, geom offset -0.05), floor at z=0.
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 1.95), atol=0.1)
+
+
+def test_raycast_frame_attachment_site(robot_with_floor_xml, device):
+ """Verify rays can be attached to a site frame."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ raycast_cfg = RayCastSensorCfg(
+ name="site_scan",
+ frame=ObjRef(type="site", name="base_site", entity="robot"),
+ pattern=GridPatternCfg(size=(0.2, 0.2), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["site_scan"]
+ sim.step()
+ data = sensor.data
+
+ assert isinstance(data, RayCastData)
+ # Site is at z=1.9 (body at z=2, site offset -0.1), floor at z=0.
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 1.9), atol=0.1)
+
+
+def test_raycast_grid_pattern_num_rays(device):
+ """Verify grid pattern generates correct number of rays."""
+ simple_xml = """
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(simple_xml))
+
+ # Grid: size=(1.0, 0.5), resolution=0.5.
+ # X: from -0.5 to 0.5 step 0.5 -> 3 points.
+ # Y: from -0.25 to 0.25 step 0.5 -> 2 points.
+ # Total: 3 * 2 = 6 rays.
+ raycast_cfg = RayCastSensorCfg(
+ name="grid_test",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(1.0, 0.5), resolution=0.5),
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["grid_test"]
+ assert sensor.num_rays == 6
+
+
+def test_raycast_different_direction(device):
+ """Verify rays work with non-default direction."""
+ wall_xml = """
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(wall_xml))
+
+ # Rays pointing in +X direction should hit wall at x=1.9.
+ raycast_cfg = RayCastSensorCfg(
+ name="forward_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.2, 0.2), resolution=0.1, direction=(1.0, 0.0, 0.0)),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["forward_scan"]
+ sim.step()
+ data = sensor.data
+
+ # Wall is at x=1.9 (wall center at x=2, size 0.1), body at x=0.
+ # Distance should be ~1.9m.
+ assert torch.allclose(data.distances, torch.full_like(data.distances, 1.9), atol=0.1)
+
+ # Normal should point in -X direction (toward the body).
+ assert torch.allclose(
+ data.normals_w[:, :, 0], -torch.ones_like(data.normals_w[:, :, 0]), atol=0.01
+ )
+
+
+def test_raycast_error_on_invalid_frame_type(device):
+ """Verify ValueError is raised for invalid frame type."""
+ with pytest.raises(ValueError, match="must be 'body', 'site', or 'geom'"):
+ simple_xml = """
+
+
+
+
+
+ """
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(simple_xml))
+
+ raycast_cfg = RayCastSensorCfg(
+ name="invalid",
+ frame=ObjRef(type="joint", name="some_joint", entity="robot"), # Invalid type
+ pattern=GridPatternCfg(size=(0.1, 0.1), resolution=0.1),
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+
+def test_raycast_hit_pos_w_correctness(robot_with_floor_xml, device):
+ """Verify hit_pos_w returns correct world-space hit positions."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ raycast_cfg = RayCastSensorCfg(
+ name="terrain_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.4, 0.4), resolution=0.2, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["terrain_scan"]
+ sim.step()
+ data = sensor.data
+
+ # All hit positions should be on the floor (z=0).
+ assert torch.allclose(
+ data.hit_pos_w[:, :, 2], torch.zeros_like(data.hit_pos_w[:, :, 2]), atol=0.01
+ )
+
+ # Hit positions X and Y should match the ray grid pattern offset from body origin.
+ # Body is at (0, 0, 2), grid is 0.4x0.4 with 0.2 resolution = 3x3 = 9 rays.
+ # X positions should be in range [-0.2, 0.2], Y positions in range [-0.2, 0.2].
+ assert torch.all(data.hit_pos_w[:, :, 0] >= -0.3)
+ assert torch.all(data.hit_pos_w[:, :, 0] <= 0.3)
+ assert torch.all(data.hit_pos_w[:, :, 1] >= -0.3)
+ assert torch.all(data.hit_pos_w[:, :, 1] <= 0.3)
+
+
+def test_raycast_max_distance_clamping(device):
+ """Verify hits beyond max_distance are reported as misses."""
+ far_floor_xml = """
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(far_floor_xml))
+
+ # max_distance=3.0, but floor is 5m away. Should report miss.
+ raycast_cfg = RayCastSensorCfg(
+ name="short_range",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.2, 0.2), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=3.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["short_range"]
+ sim.step()
+ data = sensor.data
+
+ # All rays should miss (floor is beyond max_distance).
+ assert torch.all(data.distances == -1)
+
+
+@pytest.mark.skipif(not torch.cuda.is_available(), reason="Likely bug on CPU MjWarp")
+def test_raycast_body_rotation_affects_rays(device):
+ """Verify rays rotate with the body frame."""
+ rotated_body_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(rotated_body_xml))
+
+ # Rays point in -Z in body frame. We'll tilt body 45 degrees around X.
+ # So rays should point diagonally down, hitting floor at a longer distance.
+ raycast_cfg = RayCastSensorCfg(
+ name="rotated_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["rotated_scan"]
+
+ # First, verify baseline: unrotated body, rays hit floor at ~2m.
+ sim.step()
+ data_unrotated = sensor.data
+ assert torch.allclose(
+ data_unrotated.distances, torch.full_like(data_unrotated.distances, 2.0), atol=0.1
+ )
+
+ # Now tilt body 45 degrees around X axis.
+ # Ray direction -Z in body frame becomes diagonal in world frame.
+ # Distance to floor should be 2 / cos(45) = 2 * sqrt(2) ≈ 2.83m.
+ angle = math.pi / 4
+ quat = [math.cos(angle / 2), math.sin(angle / 2), 0, 0] # w, x, y, z
+ sim.data.qpos[0, 3:7] = torch.tensor(quat, device=device)
+ sim.step()
+ data_rotated = sensor.data
+
+ expected_distance = 2.0 / math.cos(angle) # ~2.83m
+ assert torch.allclose(
+ data_rotated.distances,
+ torch.full_like(data_rotated.distances, expected_distance),
+ atol=0.15,
+ ), f"Expected ~{expected_distance:.2f}m, got {data_rotated.distances}"
+
+
+# ============================================================================
+# Pinhole Camera Pattern Tests
+# ============================================================================
+
+
+def test_pinhole_camera_pattern_num_rays(device):
+ """Verify pinhole pattern generates width * height rays."""
+ simple_xml = """
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(simple_xml))
+
+ raycast_cfg = RayCastSensorCfg(
+ name="camera_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=PinholeCameraPatternCfg(width=16, height=12, fovy=74.0),
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["camera_scan"]
+ assert sensor.num_rays == 16 * 12
+
+
+def test_pinhole_camera_fov(robot_with_floor_xml, device):
+ """Verify pinhole pattern ray angles match FOV."""
+ entity_cfg = EntityCfg(
+ spec_fn=lambda: mujoco.MjSpec.from_string(robot_with_floor_xml)
+ )
+
+ # 90 degree vertical FOV.
+ raycast_cfg = RayCastSensorCfg(
+ name="camera_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=PinholeCameraPatternCfg(width=3, height=3, fovy=90.0),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["camera_scan"]
+ assert sensor.num_rays == 9
+
+
+def test_pinhole_from_intrinsic_matrix():
+ """Verify from_intrinsic_matrix creates correct config."""
+ # Intrinsic matrix with fx=500, fy=500, cx=320, cy=240.
+ intrinsic = [500.0, 0, 320, 0, 500.0, 240, 0, 0, 1]
+ width, height = 640, 480
+
+ cfg = PinholeCameraPatternCfg.from_intrinsic_matrix(intrinsic, width, height)
+
+ # Expected vertical FOV: 2 * atan(480 / (2 * 500)) = 2 * atan(0.48) ≈ 51.3 degrees.
+ fy = intrinsic[4]
+ expected_fov = 2 * math.atan(height / (2 * fy)) * 180 / math.pi
+ assert abs(cfg.fovy - expected_fov) < 0.1
+ assert cfg.width == width
+ assert cfg.height == height
+
+
+def test_pinhole_from_mujoco_camera(device):
+ """Verify pinhole pattern can be created from MuJoCo camera."""
+ # XML with a camera that has explicit resolution, sensorsize, and focal.
+ camera_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(camera_xml))
+
+ # Use from_mujoco_camera() to get params from MuJoCo camera.
+ raycast_cfg = RayCastSensorCfg(
+ name="camera_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=PinholeCameraPatternCfg.from_mujoco_camera("robot/depth_cam"),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["camera_scan"]
+ # Should have 64 * 48 = 3072 rays.
+ assert sensor.num_rays == 64 * 48
+
+ # Verify rays work.
+ sim.step()
+ data = sensor.data
+ assert torch.all(data.distances >= 0) # Should hit floor
+
+
+def test_pinhole_from_mujoco_camera_fovy_mode(device):
+ """Verify pinhole pattern works with MuJoCo camera using fovy (not sensorsize)."""
+ # XML with a camera using fovy mode (no sensorsize/focal).
+ camera_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(camera_xml))
+
+ raycast_cfg = RayCastSensorCfg(
+ name="camera_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=PinholeCameraPatternCfg.from_mujoco_camera("robot/fovy_cam"),
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["camera_scan"]
+ # Should have 32 * 24 = 768 rays.
+ assert sensor.num_rays == 32 * 24
+
+ # Verify rays work.
+ sim.step()
+ data = sensor.data
+ assert torch.all(data.distances >= 0) # Should hit floor
+
+
+# ============================================================================
+# Ray Alignment Tests
+# ============================================================================
+
+
+@pytest.mark.skipif(not torch.cuda.is_available(), reason="Likely bug on CPU MjWarp")
+def test_ray_alignment_yaw(device):
+ """Verify yaw alignment ignores pitch/roll."""
+ rotated_body_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(rotated_body_xml))
+
+ # With yaw alignment, tilting the body should NOT affect ray direction.
+ raycast_cfg = RayCastSensorCfg(
+ name="yaw_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ ray_alignment="yaw",
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["yaw_scan"]
+
+ # Baseline: unrotated.
+ sim.step()
+ data_unrotated = sensor.data
+ baseline_dist = data_unrotated.distances.clone()
+
+ # Tilt body 45 degrees around X axis.
+ angle = math.pi / 4
+ quat = [math.cos(angle / 2), math.sin(angle / 2), 0, 0] # w, x, y, z
+ sim.data.qpos[0, 3:7] = torch.tensor(quat, device=device)
+ sim.step()
+ data_tilted = sensor.data
+
+ # With yaw alignment, distance should remain ~2m (not change due to tilt).
+ assert torch.allclose(data_tilted.distances, baseline_dist, atol=0.1), (
+ f"Expected ~2m, got {data_tilted.distances}"
+ )
+
+
+@pytest.mark.skipif(not torch.cuda.is_available(), reason="Likely bug on CPU MjWarp")
+def test_ray_alignment_world(device):
+ """Verify world alignment keeps rays fixed."""
+ rotated_body_xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(rotated_body_xml))
+
+ # With world alignment, rotating body should NOT affect ray direction.
+ raycast_cfg = RayCastSensorCfg(
+ name="world_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(0.0, 0.0, -1.0)),
+ ray_alignment="world",
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["world_scan"]
+
+ # Baseline: unrotated.
+ sim.step()
+ data_unrotated = sensor.data
+ baseline_dist = data_unrotated.distances.clone()
+
+ # Rotate body 90 degrees around Z (yaw), then tilt 45 degrees around X.
+ # With world alignment, distance should still be ~2m.
+ yaw_angle = math.pi / 2
+ pitch_angle = math.pi / 4
+ # Compose quaternions: yaw then pitch.
+ cy, sy = math.cos(yaw_angle / 2), math.sin(yaw_angle / 2)
+ cp, sp = math.cos(pitch_angle / 2), math.sin(pitch_angle / 2)
+ # q_yaw = [cy, 0, 0, sy], q_pitch = [cp, sp, 0, 0]
+ # q = q_pitch * q_yaw
+ qw = cp * cy
+ qx = sp * cy
+ qy = sp * sy
+ qz = cp * sy
+ sim.data.qpos[0, 3:7] = torch.tensor([qw, qx, qy, qz], device=device)
+ sim.step()
+ data_rotated = sensor.data
+
+ # With world alignment, distance should remain ~2m.
+ assert torch.allclose(data_rotated.distances, baseline_dist, atol=0.1), (
+ f"Expected ~2m, got {data_rotated.distances}"
+ )
+
+
+@pytest.mark.skipif(not torch.cuda.is_available(), reason="Likely bug on CPU MjWarp")
+def test_ray_alignment_yaw_singularity(device):
+ """Test yaw alignment handles 90° pitch singularity correctly.
+
+ With yaw alignment, rays should maintain their pattern regardless of body pitch.
+ At 90° pitch, the body's X-axis is vertical, making yaw extraction ambiguous.
+ The implementation uses Y-axis fallback to produce a valid yaw rotation.
+
+ This test verifies that distances at 90° pitch match the baseline (0° pitch).
+ """
+ xml = """
+
+
+
+
+
+
+
+
+
+
+ """
+
+ entity_cfg = EntityCfg(spec_fn=lambda: mujoco.MjSpec.from_string(xml))
+
+ # Use grid pattern with diagonal direction - has X component to expose singularity.
+ # Direction [1, 0, -1] points forward and down at 45°.
+ raycast_cfg = RayCastSensorCfg(
+ name="yaw_scan",
+ frame=ObjRef(type="body", name="base", entity="robot"),
+ pattern=GridPatternCfg(size=(0.0, 0.0), resolution=0.1, direction=(1.0, 0.0, -1.0)),
+ ray_alignment="yaw",
+ max_distance=10.0,
+ )
+
+ scene_cfg = SceneCfg(
+ num_envs=1,
+ env_spacing=5.0,
+ entities={"robot": entity_cfg},
+ sensors=(raycast_cfg,),
+ )
+
+ scene = Scene(scene_cfg, device)
+ model = scene.compile()
+ sim_cfg = SimulationCfg(njmax=20)
+ sim = Simulation(num_envs=1, cfg=sim_cfg, model=model, device=device)
+ scene.initialize(sim.mj_model, sim.model, sim.data)
+
+ sensor = scene["yaw_scan"]
+
+ # Baseline: no rotation. Ray at 45° from height 2m hits floor at x=2, z=0.
+ sim.step()
+ baseline_hit_pos = sensor.data.hit_pos_w.clone()
+ # Ray goes diagonally +X and -Z, starting from (0,0,2), should hit floor at (2, 0, 0).
+ assert torch.allclose(
+ baseline_hit_pos[0, 0, 0], torch.tensor(2.0, device=device), atol=0.1
+ ), f"Baseline X hit should be ~2, got {baseline_hit_pos[0, 0, 0]}"
+ assert torch.allclose(
+ baseline_hit_pos[0, 0, 2], torch.tensor(0.0, device=device), atol=0.1
+ ), f"Baseline Z hit should be ~0, got {baseline_hit_pos[0, 0, 2]}"
+
+ # Pitch 90° around Y-axis. Body X-axis now points straight down (singularity).
+ angle = math.pi / 2
+ quat = [math.cos(angle / 2), 0, math.sin(angle / 2), 0] # w, x, y, z
+ sim.data.qpos[0, 3:7] = torch.tensor(quat, device=device)
+ sim.step()
+
+ singularity_hit_pos = sensor.data.hit_pos_w
+
+ # With yaw alignment, hit position should match baseline regardless of pitch.
+ # The ray should still go diagonally and hit at (2, 0, 0).
+ assert torch.allclose(singularity_hit_pos, baseline_hit_pos, atol=0.1), (
+ f"Yaw alignment failed at 90° pitch singularity.\n"
+ f"Baseline hit_pos: {baseline_hit_pos}\n"
+ f"Singularity hit_pos: {singularity_hit_pos}"
+ )
diff --git a/uv.lock b/uv.lock
index 68389456b..b3c365604 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1289,7 +1289,7 @@ requires-dist = [
{ name = "autodocsumm", marker = "extra == 'docs'" },
{ name = "moviepy" },
{ name = "mujoco", specifier = ">=3.4.0", index = "https://py.mujoco.org/" },
- { name = "mujoco-warp", git = "https://github.com/google-deepmind/mujoco_warp?rev=55ab24595ea4939ff583f1910e1615361d49d9ad" },
+ { name = "mujoco-warp", git = "https://github.com/google-deepmind/mujoco_warp?rev=1c6b31692eaa5d3134093958d839165245d196a6" },
{ name = "myst-parser", marker = "extra == 'docs'", specifier = ">=4.0.1" },
{ name = "onnxscript", specifier = ">=0.5.4" },
{ name = "prettytable" },
@@ -1500,7 +1500,7 @@ wheels = [
[[package]]
name = "mujoco-warp"
version = "0.0.1"
-source = { git = "https://github.com/google-deepmind/mujoco_warp?rev=55ab24595ea4939ff583f1910e1615361d49d9ad#55ab24595ea4939ff583f1910e1615361d49d9ad" }
+source = { git = "https://github.com/google-deepmind/mujoco_warp?rev=1c6b31692eaa5d3134093958d839165245d196a6#1c6b31692eaa5d3134093958d839165245d196a6" }
dependencies = [
{ name = "absl-py" },
{ name = "etils", extra = ["epath"] },