Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,26 @@ science_data:
Metadata field acq_duration is 17 uint. Max value of uint17 is 131071.
Dividing max counts by acq_duration gave validmax

counts_stat_uncert:
CATDESC: Counts statistical uncertainty
FIELDNAM: Counts Stats Uncertainty
DEPEND_0: epoch
DEPEND_1: esa_step
DEPEND_2: spin_sector
DEPEND_3: cem_id
LABL_PTR_1: esa_step_label
LABL_PTR_2: spin_sector_label
LABL_PTR_3: cem_id_label
DISPLAY_TYPE: spectrogram
FILLVAL: -1.0000000E+31
FORMAT: E19.5
UNITS: counts
VALIDMIN: 0.0
VALIDMAX: 1.7976931348623157e+308 # TODO: find actual value
VAR_TYPE: data
SCALETYP: linear
DICT_KEY: SPASE>Particle>ParticleType:Electron,ParticleQuantity:Counts,Qualifier:Uncertainty

acquisition_time:
CATDESC: Acquisition time organized by voltage step and spin sector
DEPEND_0: epoch
Expand Down
39 changes: 39 additions & 0 deletions imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,45 @@ flux:
VAR_TYPE: data
DICT_KEY: SPASE>Particle>ParticleType:Electron,ParticleQuantity:NumberFlux,Qualifier:Array

psd_stat_uncert:
CATDESC: Phase space density statistical uncertainty
FIELDNAM: Phase Space Density Stats Uncertainty
DEPEND_0: epoch
DEPEND_1: esa_step
DEPEND_2: spin_sector
DEPEND_3: cem_id
LABL_PTR_1: esa_step_label
LABL_PTR_2: spin_sector_label
LABL_PTR_3: cem_id_label
DISPLAY_TYPE: spectrogram
FILLVAL: -1.0000000E+31
FORMAT: E19.5
UNITS: counts
VALIDMIN: 0.0
VALIDMAX: 1.7976931348623157e+308 # TODO: find actual value
VAR_TYPE: data
SCALETYP: linear
DICT_KEY: SPASE>Particle>ParticleType:Electron,ParticleQuantity:Counts,Qualifier:Uncertainty

flux_stat_uncert:
CATDESC: Flux statistical uncertainty
FIELDNAM: Flux Stats Uncertainty
DEPEND_0: epoch
DEPEND_1: esa_step
DEPEND_2: spin_sector
DEPEND_3: cem_id
LABL_PTR_1: esa_step_label
LABL_PTR_2: spin_sector_label
LABL_PTR_3: cem_id_label
DISPLAY_TYPE: spectrogram
FILLVAL: -1.0000000E+31
FORMAT: E19.5
UNITS: counts
VALIDMIN: 0.0
VALIDMAX: 1.7976931348623157e+308 # TODO: find actual value
VAR_TYPE: data
SCALETYP: linear
DICT_KEY: SPASE>Particle>ParticleType:Electron,ParticleQuantity:Counts,Qualifier:Uncertainty

acquisition_time:
CATDESC: Acquisition time organized by ESA step and spin sector
Expand Down
9 changes: 9 additions & 0 deletions imap_processing/swe/l1b/swe_l1b.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,6 +761,10 @@ def swe_l1b_science(dependencies: ProcessingInputCollection) -> xr.Dataset:

count_rate = convert_counts_to_rate(inflight_applied_count, acq_duration)

# Statistical uncertainty is sqrt(decompressed counts)
# TODO: Update this if SWE like to include deadtime correciton.
counts_stat_uncert = np.sqrt(populated_data["science_data"])

# Store ESA energies of full cycle for L2 purposes.
esa_energies = get_esa_energy_pattern(esa_lut_files[0])
# Repeat energies to be in the same shape as the science data
Expand Down Expand Up @@ -894,6 +898,11 @@ def swe_l1b_science(dependencies: ProcessingInputCollection) -> xr.Dataset:
dims=["epoch", "esa_step", "spin_sector", "cem_id"],
attrs=cdf_attrs.get_variable_attributes("science_data"),
)
science_dataset["counts_stat_uncert"] = xr.DataArray(
counts_stat_uncert,
dims=["epoch", "esa_step", "spin_sector", "cem_id"],
attrs=cdf_attrs.get_variable_attributes("counts_stat_uncert"),
)
science_dataset["acquisition_time"] = xr.DataArray(
acq_time,
dims=["epoch", "esa_step", "spin_sector"],
Expand Down
98 changes: 80 additions & 18 deletions imap_processing/swe/l2/swe_l2.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,11 @@
from imap_processing.swe.utils import swe_constants


def calculate_phase_space_density(l1b_dataset: xr.Dataset) -> npt.NDArray:
def calculate_phase_space_density(
data: np.ndarray, particle_energy_data: np.ndarray
) -> npt.NDArray:
"""
Convert counts to phase space density.
Convert counts or uncertainty data to phase space density.

Calculate phase space density is represented by this symbol, fv.
Its unit is s^3/ (cm^6 * ster).
Expand Down Expand Up @@ -49,27 +51,27 @@ def calculate_phase_space_density(l1b_dataset: xr.Dataset) -> npt.NDArray:

Parameters
----------
l1b_dataset : xarray.Dataset
The L1B dataset to process.
data : numpy.ndarray
The data to process. Two expected inputs are counts or uncertainty data.
particle_energy_data : numpy.ndarray
The energy values in eV. This is the energy values from the
"esa_energy" variable in the L1B dataset.

Returns
-------
phase_space_density : np.ndarray
Phase space density. We need to call this phase space density because
there will be density in L3 processing.
"""
# Get energy values.
particle_energy_data = l1b_dataset["esa_energy"].values

# Calculate phase space density using formula:
# 2 * (C/tau) / (G * 1.237e31 * eV^2)
# 2 * ((C/tau) or uncertainty data) / (G * 1.237e31 * eV^2)
# See doc string for more details.
density = (2 * l1b_dataset["science_data"]) / (
density = (2 * data) / (
swe_constants.GEOMETRIC_FACTORS[np.newaxis, np.newaxis, np.newaxis, :]
* swe_constants.VELOCITY_CONVERSION_FACTOR
* particle_energy_data[:, :, :, np.newaxis] ** 2
)
phase_space_density = density.data
phase_space_density = density
Comment thread
tech3371 marked this conversation as resolved.
Outdated

return phase_space_density

Expand Down Expand Up @@ -114,7 +116,7 @@ def calculate_flux(
Parameters
----------
phase_space_density : numpy.ndarray
The phase space density.
The phase space density of counts or uncertainty data.
esa_energy : numpy.ndarray
The energy values in eV.

Expand All @@ -132,7 +134,7 @@ def calculate_flux(


def put_data_into_angle_bins(
data: np.ndarray, angle_bin_indices: npt.NDArray[np.int_]
data: np.ndarray, angle_bin_indices: npt.NDArray[np.int_], is_unc: bool = False
) -> npt.NDArray:
"""
Put data in its angle bins.
Expand All @@ -142,10 +144,11 @@ def put_data_into_angle_bins(
full cycle, it assigns data to the corresponding angle bin
based on the provided indices.

Since multiple data points may fall into the same angle bin,
the function accumulates values and computes the average across
all 7 CEMs, ensuring that each bin contains a representative
mean value while maintaining the 7 CEM structure.
Since multiple data points can fall into the same angle bin,
this function assigns each data point to its bin and sums
the values within that bin. If the data is uncertainty data,
it computes the combined uncertainty for the bin; otherwise,
it calculates the averages.

Parameters
----------
Expand All @@ -155,6 +158,10 @@ def put_data_into_angle_bins(
angle_bin_indices : numpy.ndarray
Indices of angle bins to put data in. Shape:
(full_cycle_data, N_ESA_STEPS, N_ANGLE_BINS).
is_unc : bool
Whether data is uncertainty data or not. If yes, sqrt(sum(data)).
Comment thread
tech3371 marked this conversation as resolved.
Outdated
Otherwise, find mean of data.
Default to False.

Returns
-------
Expand All @@ -177,7 +184,29 @@ def put_data_into_angle_bins(
time_indices = np.arange(data.shape[0])[:, None, None]
energy_indices = np.arange(swe_constants.N_ESA_STEPS)[None, :, None]

# Use np.add.at() to accumulate values into bins
if is_unc:
Comment thread
tech3371 marked this conversation as resolved.
Outdated
# Calculate new uncertainty of each uncertainty data in the bins.
# Per SWE instruction:
# At L1B, 'data' is result from sqrt(counts). Now in L2, average
# uncertainty data using this formula:
# sqrt(
# sum(
# (unc_1) ** 2 + (unc_2) ** 2 + ... + (unc_n) ** 2
# )
# )
# TODO: SWE want to add more defined formula based on spin data and
# counts uncertainty from it in the future.

# Use np.add.at() to put values into bins and add values in the bins into one.
# Here, we are applying power of 2 to each data point before summing them.
np.add.at(
binned_data,
(time_indices, energy_indices, angle_bin_indices),
data**2,
)
return np.sqrt(binned_data)

# Use np.add.at() to put values into bins and add values in the bins into one.
np.add.at(binned_data, (time_indices, energy_indices, angle_bin_indices), data)

# Count occurrences in each bin to compute the mean.
Expand Down Expand Up @@ -343,7 +372,9 @@ def swe_l2(l1b_dataset: xr.Dataset) -> xr.Dataset:
# Calculate phase space density and flux. Store data in shape
# (epoch, esa_step, spin_sector, cem_id). This is for L3 purposes.
############################################################
phase_space_density = calculate_phase_space_density(l1b_dataset)
phase_space_density = calculate_phase_space_density(
l1b_dataset["science_data"].data, l1b_dataset["esa_energy"].data
)
dataset["phase_space_density_spin_sector"] = xr.DataArray(
phase_space_density,
name="phase_space_density_spin_sector",
Expand Down Expand Up @@ -419,4 +450,35 @@ def swe_l2(l1b_dataset: xr.Dataset) -> xr.Dataset:
attrs=cdf_attributes.get_variable_attributes("phase_space_density"),
)

#######################################################
# Calculate flux and phase space density of uncertainty data.
# Put uncertainty data in its angle bins.
#######################################################
# Calculate phase space density for uncertainty data.
phase_space_density_uncert = calculate_phase_space_density(
l1b_dataset["counts_stat_uncert"].data, l1b_dataset["esa_energy"].data
)
# Put uncertainty data into its spin angle bins and calculate new uncertainty
phase_space_density_uncert = put_data_into_angle_bins(
phase_space_density_uncert, spin_angle_bins_indices, is_unc=True
)
dataset["psd_stat_uncert"] = xr.DataArray(
phase_space_density_uncert,
name="psd_stat_uncert",
dims=["epoch", "esa_step", "spin_sector", "cem_id"],
attrs=cdf_attributes.get_variable_attributes("psd_stat_uncert"),
)
# Calculate flux for uncertainty data.
flux_uncert = calculate_flux(
phase_space_density_uncert, l1b_dataset["esa_energy"].data
)
flux_uncert = put_data_into_angle_bins(
flux_uncert, spin_angle_bins_indices, is_unc=True
)
dataset["flux_stat_uncert"] = xr.DataArray(
flux_uncert,
name="flux_stat_uncert",
dims=["epoch", "esa_step", "spin_sector", "cem_id"],
attrs=cdf_attributes.get_variable_attributes("flux_stat_uncert"),
)
return dataset
6 changes: 4 additions & 2 deletions imap_processing/tests/swe/test_swe_l2.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,9 @@ def test_calculate_phase_space_density():
),
}
)
phase_space_density = calculate_phase_space_density(l1b_dataset)
phase_space_density = calculate_phase_space_density(
l1b_dataset["science_data"].data, l1b_dataset["esa_energy"].data
)
assert phase_space_density.shape == (
total_sweeps,
swe_constants.N_ESA_STEPS,
Expand All @@ -83,7 +85,7 @@ def test_calculate_phase_space_density():
),
expected_calculated_density,
)
np.testing.assert_array_equal(phase_space_density[0].data, expected_density)
np.testing.assert_array_equal(phase_space_density[0], expected_density)


def test_calculate_flux():
Expand Down