Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion docs/source/code-documentation/lo.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,3 @@ The L0 code to decommutate the CCSDS packet data can be found below.
:recursive:

l0.utils
l0.data_classes
98 changes: 0 additions & 98 deletions imap_processing/lo/l0/data_classes/star_sensor.py

This file was deleted.

48 changes: 48 additions & 0 deletions imap_processing/lo/l0/lo_star_sensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Processing function for Lo star sensor data."""

import logging

import numpy as np
import xarray as xr

from imap_processing.lo.l0.utils.bit_decompression import (
DECOMPRESSION_TABLES,
Decompress,
)

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


def process_star_sensor(ds: xr.Dataset) -> xr.Dataset:
"""
Process Lo star sensor data.

Parameters
----------
ds : xr.Dataset
The packet dataset containing Lo star sensor data.

Returns
-------
xr.Dataset
Processed dataset with a decompressed data field.
"""
# Make one long flat buffer
# This assumes that all data_compressed entries are of the same length
# but allows for only one frombuffer call
buffer = b"".join(ds["data_compressed"].values)
data = np.frombuffer(buffer, dtype=np.uint8).reshape(-1, 720)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll need to double check on the 720. Originally I thought it would always be that, but Colin told me at one point that it was 720 or less. I'll ask Lo at our tag-up on Monday

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍 Let me know if you need any clarification from me or want me to join that tag-up. It won't be hard to change this, but it is a bit unclear what it is stating currently. Section 8.3.5.3 has an explicit 720 listed in the output. Then the following section states:

There is an exception in this case for the
DATA field, which contains is a N X 720 dimensional array, containing the 720 measurements
made by the star tracker for each packet.

Should these instead be (N, COUNT) dimensionality. For CDF we'd need to make arrays that are shape (N, MAX_COUNT) and fill the unused values.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sdhoyt any update on the 720 after your tag-up? Nick seemed to indicate he thought it would always be 720 for this packet.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not yet. I'm going to ask in my tag-up tomorrow morning so I'll let you know once I hear.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@greglucas I confirmed that there will always be 720 data points in the packet

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, this is ready for review then!


# Decompress from 8 -> 12 bits using the decompression tables
decompression = DECOMPRESSION_TABLES[Decompress.DECOMPRESS8TO12].astype(np.uint16)
# Use the mean value column (2)
data = decompression[data, 2]

# There is already a variable called "count" in the dataset that
# came with the packet
ds["data_index"] = xr.DataArray(np.arange(720), dims="data_index")
ds["data"] = xr.DataArray(data, dims=("epoch", "data_index"))
# Remove the original compressed data field
ds = ds.drop_vars("data_compressed")
return ds
57 changes: 0 additions & 57 deletions imap_processing/lo/l0/utils/lo_base.py

This file was deleted.

64 changes: 32 additions & 32 deletions imap_processing/lo/l1a/lo_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
parse_events,
parse_histogram,
)
from imap_processing.lo.l0.lo_star_sensor import process_star_sensor
from imap_processing.utils import convert_to_binary_string, packet_file_to_datasets

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -50,60 +51,59 @@ def lo_l1a(dependency: Path) -> list[xr.Dataset]:
attr_mgr.add_instrument_global_attrs(instrument="lo")
attr_mgr.add_instrument_variable_attrs(instrument="lo", level="l1a")

datasets_to_return = []

if LoAPID.ILO_SPIN in datasets_by_apid:
logger.info(
f"\nProcessing {LoAPID(LoAPID.ILO_SPIN).name} "
f"packet (APID: {LoAPID.ILO_SPIN.value})"
)
logical_source = "imap_lo_l1a_spin"
datasets_by_apid[LoAPID.ILO_SPIN] = organize_spin_data(
datasets_by_apid[LoAPID.ILO_SPIN], attr_mgr
)

datasets_by_apid[LoAPID.ILO_SPIN] = add_dataset_attrs(
datasets_by_apid[LoAPID.ILO_SPIN], attr_mgr, logical_source
)
ds = datasets_by_apid[LoAPID.ILO_SPIN]
ds = organize_spin_data(ds, attr_mgr)
ds = add_dataset_attrs(ds, attr_mgr, logical_source)
datasets_to_return.append(ds)
if LoAPID.ILO_SCI_CNT in datasets_by_apid:
logger.info(
f"\nProcessing {LoAPID(LoAPID.ILO_SCI_CNT).name} "
f"packet (APID: {LoAPID.ILO_SCI_CNT.value})"
)
logical_source = "imap_lo_l1a_histogram"
datasets_by_apid[LoAPID.ILO_SCI_CNT] = parse_histogram(
datasets_by_apid[LoAPID.ILO_SCI_CNT], attr_mgr
)
datasets_by_apid[LoAPID.ILO_SCI_CNT] = add_dataset_attrs(
datasets_by_apid[LoAPID.ILO_SCI_CNT], attr_mgr, logical_source
)
ds = datasets_by_apid[LoAPID.ILO_SCI_CNT]
ds = parse_histogram(ds, attr_mgr)
ds = add_dataset_attrs(ds, attr_mgr, logical_source)
datasets_to_return.append(ds)
if LoAPID.ILO_SCI_DE in datasets_by_apid:
logger.info(
f"\nProcessing {LoAPID(LoAPID.ILO_SCI_DE).name} "
f"packet (APID: {LoAPID.ILO_SCI_DE.value})"
)
logical_source = "imap_lo_l1a_de"
datasets_by_apid[LoAPID.ILO_SCI_DE]["data"] = xr.DataArray(
[
convert_to_binary_string(data)
for data in datasets_by_apid[LoAPID.ILO_SCI_DE]["data"].values
],
dims=datasets_by_apid[LoAPID.ILO_SCI_DE]["data"].dims,
attrs=datasets_by_apid[LoAPID.ILO_SCI_DE]["data"].attrs,
)

datasets_by_apid[LoAPID.ILO_SCI_DE] = combine_segmented_packets(
datasets_by_apid[LoAPID.ILO_SCI_DE]
ds = datasets_by_apid[LoAPID.ILO_SCI_DE]
# Process the "data" array into a string
ds["data"] = xr.DataArray(
[convert_to_binary_string(data) for data in ds["data"].values],
dims=ds["data"].dims,
attrs=ds["data"].attrs,
)

datasets_by_apid[LoAPID.ILO_SCI_DE] = parse_events(
datasets_by_apid[LoAPID.ILO_SCI_DE], attr_mgr
)
datasets_by_apid[LoAPID.ILO_SCI_DE] = add_dataset_attrs(
datasets_by_apid[LoAPID.ILO_SCI_DE], attr_mgr, logical_source
ds = combine_segmented_packets(ds)
ds = parse_events(ds, attr_mgr)
ds = add_dataset_attrs(ds, attr_mgr, logical_source)
datasets_to_return.append(ds)
if LoAPID.ILO_STAR in datasets_by_apid:
logger.info(
f"\nProcessing {LoAPID(LoAPID.ILO_STAR).name} "
f"packet (APID: {LoAPID.ILO_STAR.value})"
)
logical_source = "imap_lo_l1a_star"
ds = datasets_by_apid[LoAPID.ILO_STAR]
ds = process_star_sensor(ds)
ds = add_dataset_attrs(ds, attr_mgr, logical_source)
datasets_to_return.append(ds)

good_apids = [LoAPID.ILO_SPIN, LoAPID.ILO_SCI_CNT, LoAPID.ILO_SCI_DE]
logger.info(f"\nReturning datasets: {[LoAPID(apid) for apid in good_apids]}")
return [datasets_by_apid[good_apid] for good_apid in good_apids]
logger.info(f"Returning [{len(datasets_to_return)}] datasets")
return datasets_to_return


def add_dataset_attrs(
Expand Down
1 change: 1 addition & 0 deletions imap_processing/tests/lo/test_lo_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def test_lo_l1a():
"imap_lo_l1a_spin",
"imap_lo_l1a_histogram",
"imap_lo_l1a_de",
"imap_lo_l1a_star",
]
output_dataset = lo_l1a(dependency)

Expand Down
64 changes: 41 additions & 23 deletions imap_processing/tests/lo/test_star_sensor.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,53 @@
from collections import namedtuple

import numpy as np
import pytest

from imap_processing.ccsds.ccsds_data import CcsdsData
from imap_processing.lo.l0.data_classes.star_sensor import StarSensor
from imap_processing import imap_module_directory
from imap_processing.lo.l0.lo_apid import LoAPID
from imap_processing.lo.l0.lo_star_sensor import process_star_sensor
from imap_processing.utils import packet_file_to_datasets


@pytest.fixture
def star_sensor():
fake_data_field = namedtuple("fake_packet", ["raw_value", "derived_value"])
star_sensor = StarSensor.__new__(StarSensor)
star_sensor.ccsds_header = CcsdsData(
{
"VERSION": fake_data_field(0, 0),
"TYPE": fake_data_field(0, 0),
"SEC_HDR_FLG": fake_data_field(0, 0),
"PKT_APID": fake_data_field(706, 706),
"SEQ_FLGS": fake_data_field(0, 0),
"SRC_SEQ_CTR": fake_data_field(0, 0),
"PKT_LEN": fake_data_field(0, 0),
}
def star_sensor_ds():
xtce_file = imap_module_directory / "lo/packet_definitions/lo_xtce.xml"
dependency = (
imap_module_directory / "tests/lo/test_pkts/imap_lo_l0_raw_20240803_v002.pkts"
)
datasets_by_apid = packet_file_to_datasets(
packet_file=dependency.resolve(),
xtce_packet_definition=xtce_file.resolve(),
use_derived_value=False,
)
return star_sensor
return datasets_by_apid[LoAPID.ILO_STAR]


def test_science_counts(star_sensor):
## Arrange
star_sensor.DATA_COMPRESSED = "0" * 5760
def test_star_sensor(star_sensor_ds):
validation_file = (
imap_module_directory
/ "tests/lo/validation_data"
/ "Instrument_FM1_T104_R129_20240803_ILO_STAR_EU_trimmed.csv"
)
validation_arr = np.loadtxt(validation_file, delimiter=",", skiprows=1, dtype=int)
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is easier than pandas read_csv because there are so many columns, so just subset the fields ourselves instead.

validation_shcoarse = validation_arr[:, 0]
validation_count = validation_arr[:, 1]
# The first 720
# validation_data_compressed = validation_arr[:, 2:722]
validation_data_decompressed = validation_arr[:, 722:-1]
validation_checksum = validation_arr[:, -1]

## Act
star_sensor._decompress_data()
ds = process_star_sensor(star_sensor_ds)

## Assert
assert star_sensor.DATA.shape == (720,)
# 45 times and 720 count values
assert ds["data"].shape == (45, 720)
assert ds["data"].dtype == np.uint16

# We are only spot checking a few values from the validation file
# the first 3 and the final value.
small_ds = ds.isel(epoch=[0, 1, 2, -1])
assert len(small_ds["epoch"]) == 4
np.testing.assert_array_equal(small_ds["shcoarse"], validation_shcoarse)
np.testing.assert_array_equal(small_ds["count"], validation_count)
np.testing.assert_array_equal(small_ds["data"], validation_data_decompressed)
np.testing.assert_array_equal(small_ds["chksum"], validation_checksum)
Loading