Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,81 @@ default_float64_attrs: &default_float64
VALIDMAX: 1.7976931348623157e+308
dtype: float64


# Final I-ALiRT data product fields
CODICE_LO_IAL_DATA_FIELDS = [
"c_over_o_abundance",
"mg_over_o_abundance",
"fe_over_o_abundance",
"c_plus_6_over_c_plus_5_ratio",
"o_plus_7_over_o_plus_6_ratio",
"fe_low_over_fe_high_ratio",
]
CODICE_HI_IAL_DATA_FIELDS = ["h"]

codicehi_h:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Keep in mind that each of these variables will be a single column in the database. So there will not be dimensions here. For SWE, for example, I have a separate variable for each energy level. Does this need to be broken down further?

Copy link
Copy Markdown
Collaborator Author

@bourque bourque Jun 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good question/good point -- For this particular data variable, there are 4 dimensions: epoch, energy (there are 15 levels), ssd_index (there are 4 of these), and spin_sector (there are 4 of these). So the dimensions of the data are (<# epochs>, 15, 4, 4).

I am not sure how to break this down. Would this mean I need to make (15*4*4=) 240 separate variables?

<<: *default_float32
CATDESC: H intensities in 15 energy ranges and binned into 4 azimuths and 4 spin angle bins
FIELDNAM: H intensities
LABLAXIS: H
UNITS: "# / cm2-sr-s- MeV"
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_c_over_o_abundance:
<<: *default_float32
CATDESC: C/O abundance ratio
FIELDNAM: C/O abundance ratio
LABLAXIS: C/O
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_mg_over_o_abundance:
<<: *default_float32
CATDESC: Mg/O abundance ratio
FIELDNAM: Mg/O abundance ratio
LABLAXIS: Mg/O
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_fe_over_o_abundance:
<<: *default_float32
CATDESC: Fe/O abundance ratio
FIELDNAM: Fe/O abundance ratio
LABLAXIS: Fe/O
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_c_plus_6_over_c_plus_5_ratio:
<<: *default_float32
CATDESC: C+6/C+5 charge state ratio
FIELDNAM: C+6/C+5 charge state ratio
LABLAXIS: C+6/C+5
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_o_plus_7_over_o_plus_6_ratio:
<<: *default_float32
CATDESC: O+7/O+6 charge state ratio
FIELDNAM: O+7/O+6 charge state ratio
LABLAXIS: O+7/O+6
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

codicelo_fe_low_over_fe_high_ratio:
<<: *default_float32
CATDESC: Fe low/Fe high charge state ratio
FIELDNAM: Fe low/Fe high charge state ratio
LABLAXIS: Fe low/Fe high
UNITS: " "
VALIDMIN: 0
VALIDMAX: 100000000.0

hit_e_a_side_low_en:
<<: *default_uint32
CATDESC: Low energy (~300 keV) electrons (A-side)
Expand Down
149 changes: 87 additions & 62 deletions imap_processing/codice/codice_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,9 @@ def set_data_product_config(self, apid: int, dataset: xr.Dataset) -> None:
self.cdf_attrs.add_instrument_variable_attrs("codice", "l1a")


def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[bytearray]:
def group_ialirt_data(
packets: xr.Dataset, data_field_range: range, prefix: str
) -> list[bytearray]:
"""
Group together the individual I-ALiRT data fields.

Expand All @@ -684,6 +686,8 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte
The dataset containing the I-ALiRT data packets.
data_field_range : range
The range of the individual data fields (15 or lo, 6 for hi).
prefix : str
The prefix used to index the data (i.e. ``cod_lo`` or ``cod_hi``).

Returns
-------
Expand All @@ -693,14 +697,28 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte
current_data_stream = bytearray()
grouped_data = []

# Workaround to get this function working for both I-ALiRT spacecraft
# data and CoDICE-specific I-ALiRT test data from Joey
# TODO: Once CoDICE I-ALiRT processing is more established, we can probably
# do away with processing the test data from Joey and just use the
# I-ALiRT data that is constructed closer to what we expect in-flight.
if hasattr(packets, "acquisition_time"):
time_key = "acquisition_time"
counter_key = "counter"
data_key = "data"
else:
time_key = f"{prefix}_acq"
counter_key = f"{prefix}_counter"
data_key = f"{prefix}_data"

# When a counter value of 255 is encountered, this signifies the
# end of the data stream
for packet_num in range(0, len(packets.acquisition_time.data)):
counter = packets.counter.data[packet_num]
for packet_num in range(0, len(packets[time_key].data)):
counter = packets[counter_key].data[packet_num]
if counter != 255:
for field in data_field_range:
current_data_stream.extend(
bytearray([packets[f"data_{field:02}"].data[packet_num]])
bytearray([packets[f"{data_key}_{field:02}"].data[packet_num]])
)
else:
# At this point, if there are data, the data stream is ready
Expand Down Expand Up @@ -1003,72 +1021,79 @@ def create_ialirt_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
# See sections 10.4.1 and 10.4.2 in the algorithm document
if apid == CODICEAPID.COD_LO_IAL:
data_field_range = range(0, 15)
prefix = "cod_lo"
elif apid == CODICEAPID.COD_HI_IAL:
data_field_range = range(0, 5)
prefix = "cod_hi"

# Group together packets of I-ALiRT data to form complete data sets
grouped_data = group_ialirt_data(packets, data_field_range)

# Process each group to get the science data and corresponding metadata
science_values, metadata_values = process_ialirt_data_streams(grouped_data)

# How data are processed is different for lo-iarlirt and hi-ialirt
if apid == CODICEAPID.COD_HI_IAL:
# Set some necessary values and process as a binned dataset similar to
# a hi-omni data product
metadata_for_processing = [
"table_id",
"plan_id",
"plan_step",
"view_id",
"spin_period",
"suspect",
]
for var in metadata_for_processing:
packets[var] = metadata_values[var.upper()]
dataset = create_binned_dataset(apid, packets, science_values)

elif apid == CODICEAPID.COD_LO_IAL:
# Create a nominal instance of the pipeline and process similar to a
# lo-sw-species data product
pipeline = CoDICEL1aPipeline(
metadata_values["TABLE_ID"][0],
metadata_values["PLAN_ID"][0],
metadata_values["PLAN_STEP"][0],
metadata_values["VIEW_ID"][0],
)
pipeline.set_data_product_config(apid, packets)
pipeline.decompress_data(science_values)
pipeline.reshape_data()

# The calculate_epoch_values method needs acq_start_seconds and
# acq_start_subseconds attributes on the dataset
pipeline.dataset["acq_start_seconds"] = (
"_",
metadata_values["ACQ_START_SECONDS"],
)
pipeline.dataset["acq_start_subseconds"] = (
"_",
metadata_values["ACQ_START_SUBSECONDS"],
)
grouped_data = group_ialirt_data(packets, data_field_range, prefix)

if grouped_data:
# Process each group to get the science data and corresponding metadata
science_values, metadata_values = process_ialirt_data_streams(grouped_data)

# How data are processed is different for lo-iarlirt and hi-ialirt
if apid == CODICEAPID.COD_HI_IAL:
# Set some necessary values and process as a binned dataset similar to
# a hi-omni data product
metadata_for_processing = [
"table_id",
"plan_id",
"plan_step",
"view_id",
"spin_period",
"suspect",
]
for var in metadata_for_processing:
packets[var] = metadata_values[var.upper()]
dataset = create_binned_dataset(apid, packets, science_values)

elif apid == CODICEAPID.COD_LO_IAL:
# Create a nominal instance of the pipeline and process similar to a
# lo-sw-species data product
pipeline = CoDICEL1aPipeline(
metadata_values["TABLE_ID"][0],
metadata_values["PLAN_ID"][0],
metadata_values["PLAN_STEP"][0],
metadata_values["VIEW_ID"][0],
)
pipeline.set_data_product_config(apid, packets)
pipeline.decompress_data(science_values)
pipeline.reshape_data()
Comment on lines -1010 to +1063
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nothing much is changing here, I just put this all in an if/else statement to check if there are grouped datasets, and if there are none, throw a warning.


pipeline.define_coordinates()
# The calculate_epoch_values method needs acq_start_seconds and
# acq_start_subseconds attributes on the dataset
pipeline.dataset["acq_start_seconds"] = (
"_",
metadata_values["ACQ_START_SECONDS"],
)
pipeline.dataset["acq_start_subseconds"] = (
"_",
metadata_values["ACQ_START_SUBSECONDS"],
)

# The dataset also needs the metadata that will be carried through
# to the final data product
for field in [
"spin_period",
"suspect",
"st_bias_gain_mode",
"sw_bias_gain_mode",
"rgfo_half_spin",
"nso_half_spin",
]:
pipeline.dataset[field] = ("_", metadata_values[field.upper()])
pipeline.define_coordinates()

dataset = pipeline.define_data_variables()
# The dataset also needs the metadata that will be carried through
# to the final data product
for field in [
"spin_period",
"suspect",
"st_bias_gain_mode",
"sw_bias_gain_mode",
"rgfo_half_spin",
"nso_half_spin",
]:
pipeline.dataset[field] = ("_", metadata_values[field.upper()])

return dataset
dataset = pipeline.define_data_variables()

return dataset

else:
logger.warning("No I-ALiRT data found")
return None
Copy link

Copilot AI Jun 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Returning None when no I-ALiRT data is found may lead to downstream NoneType errors; consider returning an empty xarray.Dataset or raising a specific exception.

Suggested change
return None
return xr.Dataset()

Copilot uses AI. Check for mistakes.


def get_de_metadata(packets: xr.Dataset, segment: int) -> bytes:
Expand Down
11 changes: 11 additions & 0 deletions imap_processing/codice/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,17 @@
f"P{n}_{field}" for n in range(8) for field in LO_PHA_CDF_FIELDS
]

# Final I-ALiRT data product fields
CODICE_LO_IAL_DATA_FIELDS = [
"c_over_o_abundance",
"mg_over_o_abundance",
"fe_over_o_abundance",
"c_plus_6_over_c_plus_5_ratio",
"o_plus_7_over_o_plus_6_ratio",
"fe_low_over_fe_high_ratio",
]
CODICE_HI_IAL_DATA_FIELDS = ["h"]

# lo- and hi-counters-aggregated data product variables are dynamically
# determined based on the number of active counters
# TODO: Try to convince Joey to move to lower case variable names with
Expand Down
53 changes: 48 additions & 5 deletions imap_processing/ialirt/l0/process_codice.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
"""Functions to support I-ALiRT CoDICE processing."""

import logging
from typing import Any

import xarray as xr

from imap_processing.codice.codice_l1a import create_ialirt_dataset
from imap_processing.codice import constants
from imap_processing.ialirt.utils.time import calculate_time
from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc

logger = logging.getLogger(__name__)


def process_codice(dataset: xr.Dataset) -> list[dict]:
def process_codice(
dataset: xr.Dataset,
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
"""
Create final data products.

Expand All @@ -31,8 +36,12 @@ def process_codice(dataset: xr.Dataset) -> list[dict]:
- Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
- Calculate the public data products
"""
apid = dataset.pkt_apid.data[0]
codice_data = create_ialirt_dataset(apid, dataset)
# For I-ALiRT SIT, the test data being used has all zeros and thus no
# groups can be found, thus there is no data to process
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
# this can be turned back on
# codicelo_data = create_ialirt_dataset(CODICEAPID.COD_LO_IAL, dataset)
# codicehi_data = create_ialirt_dataset(CODICEAPID.COD_HI_IAL, dataset)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right after launch we will immediately begin to receive I-ALiRT packets that are empty. So we might have this scenario in real-life. Something to consider maybe in another PR, though, if it is too complicated to change now.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a preferred way to handle empty packets? i.e. in that case, should the code return a completely empty list? Or a list with dicts that have all the expected fields but are empty?


# TODO: calculate rates
# This will be done in codice.codice_l1b
Expand All @@ -41,5 +50,39 @@ def process_codice(dataset: xr.Dataset) -> list[dict]:
# This will be done in codice.codice_l2

# TODO: calculate the public data products
# This will be done in this module

return codice_data
# Create mock dataset for I-ALiRT SIT
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
# we should be able to properly populate the I-ALiRT data, but for
# now, just create lists of dicts with FILLVALs
cod_lo_data = []
cod_hi_data = []

for epoch in range(len(dataset.epoch.data)):
sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
utc = met_to_utc(met).split(".")[0]
ttj2000ns = int(met_to_ttj2000ns(met))

epoch_data = {
"apid": int(dataset.pkt_apid[epoch].data),
"met": met,
"utc": utc,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
"utc": utc,
"met_to_utc": utc,

"ttj2000ns": ttj2000ns,
}

# Add in CoDICE-Lo specific data
cod_lo_epoch_data = epoch_data.copy()
for field in constants.CODICE_LO_IAL_DATA_FIELDS:
cod_lo_epoch_data[f"codicelo_{field}"] = -1.0e31
Comment thread
bourque marked this conversation as resolved.
Outdated
Comment thread
bourque marked this conversation as resolved.
Outdated
Comment thread
bourque marked this conversation as resolved.
Outdated
cod_lo_data.append(cod_lo_epoch_data)

# Add in CoDICE-Hi specific data
cod_hi_epoch_data = epoch_data.copy()
for field in constants.CODICE_HI_IAL_DATA_FIELDS:
cod_hi_epoch_data[f"codicehi_{field}"] = -1.0e31
cod_hi_data.append(cod_hi_epoch_data)

return cod_lo_data, cod_hi_data
Loading
Loading