-
Notifications
You must be signed in to change notification settings - Fork 33
Changes to CoDICE I-ALiRT processing in prepration for I-ALiRT SIT #1832
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
6c10cc8
a76121e
40eb017
103b8ef
4f04699
0d3e0d3
ef1ab2e
547c41e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -674,7 +674,9 @@ def set_data_product_config(self, apid: int, dataset: xr.Dataset) -> None: | |||||
| self.cdf_attrs.add_instrument_variable_attrs("codice", "l1a") | ||||||
|
|
||||||
|
|
||||||
| def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[bytearray]: | ||||||
| def group_ialirt_data( | ||||||
| packets: xr.Dataset, data_field_range: range, prefix: str | ||||||
| ) -> list[bytearray]: | ||||||
| """ | ||||||
| Group together the individual I-ALiRT data fields. | ||||||
|
|
||||||
|
|
@@ -684,6 +686,8 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte | |||||
| The dataset containing the I-ALiRT data packets. | ||||||
| data_field_range : range | ||||||
| The range of the individual data fields (15 or lo, 6 for hi). | ||||||
| prefix : str | ||||||
| The prefix used to index the data (i.e. ``cod_lo`` or ``cod_hi``). | ||||||
|
|
||||||
| Returns | ||||||
| ------- | ||||||
|
|
@@ -693,14 +697,28 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte | |||||
| current_data_stream = bytearray() | ||||||
| grouped_data = [] | ||||||
|
|
||||||
| # Workaround to get this function working for both I-ALiRT spacecraft | ||||||
| # data and CoDICE-specific I-ALiRT test data from Joey | ||||||
| # TODO: Once CoDICE I-ALiRT processing is more established, we can probably | ||||||
| # do away with processing the test data from Joey and just use the | ||||||
| # I-ALiRT data that is constructed closer to what we expect in-flight. | ||||||
| if hasattr(packets, "acquisition_time"): | ||||||
| time_key = "acquisition_time" | ||||||
| counter_key = "counter" | ||||||
| data_key = "data" | ||||||
| else: | ||||||
| time_key = f"{prefix}_acq" | ||||||
| counter_key = f"{prefix}_counter" | ||||||
| data_key = f"{prefix}_data" | ||||||
|
|
||||||
| # When a counter value of 255 is encountered, this signifies the | ||||||
| # end of the data stream | ||||||
| for packet_num in range(0, len(packets.acquisition_time.data)): | ||||||
| counter = packets.counter.data[packet_num] | ||||||
| for packet_num in range(0, len(packets[time_key].data)): | ||||||
| counter = packets[counter_key].data[packet_num] | ||||||
| if counter != 255: | ||||||
| for field in data_field_range: | ||||||
| current_data_stream.extend( | ||||||
| bytearray([packets[f"data_{field:02}"].data[packet_num]]) | ||||||
| bytearray([packets[f"{data_key}_{field:02}"].data[packet_num]]) | ||||||
| ) | ||||||
| else: | ||||||
| # At this point, if there are data, the data stream is ready | ||||||
|
|
@@ -1003,72 +1021,79 @@ def create_ialirt_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset: | |||||
| # See sections 10.4.1 and 10.4.2 in the algorithm document | ||||||
| if apid == CODICEAPID.COD_LO_IAL: | ||||||
| data_field_range = range(0, 15) | ||||||
| prefix = "cod_lo" | ||||||
| elif apid == CODICEAPID.COD_HI_IAL: | ||||||
| data_field_range = range(0, 5) | ||||||
| prefix = "cod_hi" | ||||||
|
|
||||||
| # Group together packets of I-ALiRT data to form complete data sets | ||||||
| grouped_data = group_ialirt_data(packets, data_field_range) | ||||||
|
|
||||||
| # Process each group to get the science data and corresponding metadata | ||||||
| science_values, metadata_values = process_ialirt_data_streams(grouped_data) | ||||||
|
|
||||||
| # How data are processed is different for lo-iarlirt and hi-ialirt | ||||||
| if apid == CODICEAPID.COD_HI_IAL: | ||||||
| # Set some necessary values and process as a binned dataset similar to | ||||||
| # a hi-omni data product | ||||||
| metadata_for_processing = [ | ||||||
| "table_id", | ||||||
| "plan_id", | ||||||
| "plan_step", | ||||||
| "view_id", | ||||||
| "spin_period", | ||||||
| "suspect", | ||||||
| ] | ||||||
| for var in metadata_for_processing: | ||||||
| packets[var] = metadata_values[var.upper()] | ||||||
| dataset = create_binned_dataset(apid, packets, science_values) | ||||||
|
|
||||||
| elif apid == CODICEAPID.COD_LO_IAL: | ||||||
| # Create a nominal instance of the pipeline and process similar to a | ||||||
| # lo-sw-species data product | ||||||
| pipeline = CoDICEL1aPipeline( | ||||||
| metadata_values["TABLE_ID"][0], | ||||||
| metadata_values["PLAN_ID"][0], | ||||||
| metadata_values["PLAN_STEP"][0], | ||||||
| metadata_values["VIEW_ID"][0], | ||||||
| ) | ||||||
| pipeline.set_data_product_config(apid, packets) | ||||||
| pipeline.decompress_data(science_values) | ||||||
| pipeline.reshape_data() | ||||||
|
|
||||||
| # The calculate_epoch_values method needs acq_start_seconds and | ||||||
| # acq_start_subseconds attributes on the dataset | ||||||
| pipeline.dataset["acq_start_seconds"] = ( | ||||||
| "_", | ||||||
| metadata_values["ACQ_START_SECONDS"], | ||||||
| ) | ||||||
| pipeline.dataset["acq_start_subseconds"] = ( | ||||||
| "_", | ||||||
| metadata_values["ACQ_START_SUBSECONDS"], | ||||||
| ) | ||||||
| grouped_data = group_ialirt_data(packets, data_field_range, prefix) | ||||||
|
|
||||||
| if grouped_data: | ||||||
| # Process each group to get the science data and corresponding metadata | ||||||
| science_values, metadata_values = process_ialirt_data_streams(grouped_data) | ||||||
|
|
||||||
| # How data are processed is different for lo-iarlirt and hi-ialirt | ||||||
| if apid == CODICEAPID.COD_HI_IAL: | ||||||
| # Set some necessary values and process as a binned dataset similar to | ||||||
| # a hi-omni data product | ||||||
| metadata_for_processing = [ | ||||||
| "table_id", | ||||||
| "plan_id", | ||||||
| "plan_step", | ||||||
| "view_id", | ||||||
| "spin_period", | ||||||
| "suspect", | ||||||
| ] | ||||||
| for var in metadata_for_processing: | ||||||
| packets[var] = metadata_values[var.upper()] | ||||||
| dataset = create_binned_dataset(apid, packets, science_values) | ||||||
|
|
||||||
| elif apid == CODICEAPID.COD_LO_IAL: | ||||||
| # Create a nominal instance of the pipeline and process similar to a | ||||||
| # lo-sw-species data product | ||||||
| pipeline = CoDICEL1aPipeline( | ||||||
| metadata_values["TABLE_ID"][0], | ||||||
| metadata_values["PLAN_ID"][0], | ||||||
| metadata_values["PLAN_STEP"][0], | ||||||
| metadata_values["VIEW_ID"][0], | ||||||
| ) | ||||||
| pipeline.set_data_product_config(apid, packets) | ||||||
| pipeline.decompress_data(science_values) | ||||||
| pipeline.reshape_data() | ||||||
|
Comment on lines
-1010
to
+1063
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nothing much is changing here, I just put this all in an |
||||||
|
|
||||||
| pipeline.define_coordinates() | ||||||
| # The calculate_epoch_values method needs acq_start_seconds and | ||||||
| # acq_start_subseconds attributes on the dataset | ||||||
| pipeline.dataset["acq_start_seconds"] = ( | ||||||
| "_", | ||||||
| metadata_values["ACQ_START_SECONDS"], | ||||||
| ) | ||||||
| pipeline.dataset["acq_start_subseconds"] = ( | ||||||
| "_", | ||||||
| metadata_values["ACQ_START_SUBSECONDS"], | ||||||
| ) | ||||||
|
|
||||||
| # The dataset also needs the metadata that will be carried through | ||||||
| # to the final data product | ||||||
| for field in [ | ||||||
| "spin_period", | ||||||
| "suspect", | ||||||
| "st_bias_gain_mode", | ||||||
| "sw_bias_gain_mode", | ||||||
| "rgfo_half_spin", | ||||||
| "nso_half_spin", | ||||||
| ]: | ||||||
| pipeline.dataset[field] = ("_", metadata_values[field.upper()]) | ||||||
| pipeline.define_coordinates() | ||||||
|
|
||||||
| dataset = pipeline.define_data_variables() | ||||||
| # The dataset also needs the metadata that will be carried through | ||||||
| # to the final data product | ||||||
| for field in [ | ||||||
| "spin_period", | ||||||
| "suspect", | ||||||
| "st_bias_gain_mode", | ||||||
| "sw_bias_gain_mode", | ||||||
| "rgfo_half_spin", | ||||||
| "nso_half_spin", | ||||||
| ]: | ||||||
| pipeline.dataset[field] = ("_", metadata_values[field.upper()]) | ||||||
|
|
||||||
| return dataset | ||||||
| dataset = pipeline.define_data_variables() | ||||||
|
|
||||||
| return dataset | ||||||
|
|
||||||
| else: | ||||||
| logger.warning("No I-ALiRT data found") | ||||||
| return None | ||||||
|
||||||
| return None | |
| return xr.Dataset() |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -1,15 +1,20 @@ | ||||||
| """Functions to support I-ALiRT CoDICE processing.""" | ||||||
|
|
||||||
| import logging | ||||||
| from typing import Any | ||||||
|
|
||||||
| import xarray as xr | ||||||
|
|
||||||
| from imap_processing.codice.codice_l1a import create_ialirt_dataset | ||||||
| from imap_processing.codice import constants | ||||||
| from imap_processing.ialirt.utils.time import calculate_time | ||||||
| from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc | ||||||
|
|
||||||
| logger = logging.getLogger(__name__) | ||||||
|
|
||||||
|
|
||||||
| def process_codice(dataset: xr.Dataset) -> list[dict]: | ||||||
| def process_codice( | ||||||
| dataset: xr.Dataset, | ||||||
| ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: | ||||||
| """ | ||||||
| Create final data products. | ||||||
|
|
||||||
|
|
@@ -31,8 +36,12 @@ def process_codice(dataset: xr.Dataset) -> list[dict]: | |||||
| - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document) | ||||||
| - Calculate the public data products | ||||||
| """ | ||||||
| apid = dataset.pkt_apid.data[0] | ||||||
| codice_data = create_ialirt_dataset(apid, dataset) | ||||||
| # For I-ALiRT SIT, the test data being used has all zeros and thus no | ||||||
| # groups can be found, thus there is no data to process | ||||||
| # TODO: Once I-ALiRT test data is acquired that actually has data in it, | ||||||
| # this can be turned back on | ||||||
| # codicelo_data = create_ialirt_dataset(CODICEAPID.COD_LO_IAL, dataset) | ||||||
| # codicehi_data = create_ialirt_dataset(CODICEAPID.COD_HI_IAL, dataset) | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right after launch we will immediately begin to receive I-ALiRT packets that are empty. So we might have this scenario in real-life. Something to consider maybe in another PR, though, if it is too complicated to change now.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a preferred way to handle empty packets? i.e. in that case, should the code return a completely empty list? Or a list with dicts that have all the expected fields but are empty? |
||||||
|
|
||||||
| # TODO: calculate rates | ||||||
| # This will be done in codice.codice_l1b | ||||||
|
|
@@ -41,5 +50,39 @@ def process_codice(dataset: xr.Dataset) -> list[dict]: | |||||
| # This will be done in codice.codice_l2 | ||||||
|
|
||||||
| # TODO: calculate the public data products | ||||||
| # This will be done in this module | ||||||
|
|
||||||
| return codice_data | ||||||
| # Create mock dataset for I-ALiRT SIT | ||||||
| # TODO: Once I-ALiRT test data is acquired that actually has data in it, | ||||||
| # we should be able to properly populate the I-ALiRT data, but for | ||||||
| # now, just create lists of dicts with FILLVALs | ||||||
| cod_lo_data = [] | ||||||
| cod_hi_data = [] | ||||||
|
|
||||||
| for epoch in range(len(dataset.epoch.data)): | ||||||
| sc_sclk_sec = dataset.sc_sclk_sec.data[epoch] | ||||||
| sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch] | ||||||
| met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256) | ||||||
| utc = met_to_utc(met).split(".")[0] | ||||||
| ttj2000ns = int(met_to_ttj2000ns(met)) | ||||||
|
|
||||||
| epoch_data = { | ||||||
| "apid": int(dataset.pkt_apid[epoch].data), | ||||||
| "met": met, | ||||||
| "utc": utc, | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
| "ttj2000ns": ttj2000ns, | ||||||
| } | ||||||
|
|
||||||
| # Add in CoDICE-Lo specific data | ||||||
| cod_lo_epoch_data = epoch_data.copy() | ||||||
| for field in constants.CODICE_LO_IAL_DATA_FIELDS: | ||||||
| cod_lo_epoch_data[f"codicelo_{field}"] = -1.0e31 | ||||||
|
bourque marked this conversation as resolved.
Outdated
bourque marked this conversation as resolved.
Outdated
bourque marked this conversation as resolved.
Outdated
|
||||||
| cod_lo_data.append(cod_lo_epoch_data) | ||||||
|
|
||||||
| # Add in CoDICE-Hi specific data | ||||||
| cod_hi_epoch_data = epoch_data.copy() | ||||||
| for field in constants.CODICE_HI_IAL_DATA_FIELDS: | ||||||
| cod_hi_epoch_data[f"codicehi_{field}"] = -1.0e31 | ||||||
| cod_hi_data.append(cod_hi_epoch_data) | ||||||
|
|
||||||
| return cod_lo_data, cod_hi_data | ||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Keep in mind that each of these variables will be a single column in the database. So there will not be dimensions here. For SWE, for example, I have a separate variable for each energy level. Does this need to be broken down further?
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good question/good point -- For this particular data variable, there are 4 dimensions:
epoch,energy(there are 15 levels),ssd_index(there are 4 of these), andspin_sector(there are 4 of these). So the dimensions of the data are (<# epochs>, 15, 4, 4).I am not sure how to break this down. Would this mean I need to make (
15*4*4=) 240 separate variables?