diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 81347ba..77641ca 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -8,6 +8,7 @@ on: pull_request: branches: - main + - adding_timeseries jobs: build: diff --git a/hermes_eea/Global_Attrs_Cnts.py b/hermes_eea/Global_Attrs_Cnts.py new file mode 100644 index 0000000..5244336 --- /dev/null +++ b/hermes_eea/Global_Attrs_Cnts.py @@ -0,0 +1,133 @@ +import os +from datetime import datetime +import math + +class Global_Attrs: + + + def __init__(self, version,file_id,lo_ext): + + # this is linux date format: + self.Generation_date = datetime.now().strftime('%a %b %d %H:%M:%S %Y') + self.Logical_file_id = os.path.basename(file_id) + self.Data_version = version + self.Spacecraft_attitude_filenames = "" + self.lo_ext = lo_ext + + + def setCorrectionTableValues(DES_glbattr, corrTable,conf,usec_searchfile): + """saved in commit 2374012bbab301a23075b29c79ba9ab89285eb08 + these and others aren't needed by DES_L1A + """ + sf_des = float(corrTable['sf_' + conf.mode]) + #DES_glbattr.Correction_table_rev = getfilerev(corrTable["dis_p2_file_path"]) + DES_glbattr.Correction_table_name = os.path.basename(conf.corr_file) + DES_glbattr.Correction_table_scaling_factor = "{:0.5f}".format(sf_des) + DES_glbattr.Energy_table_name = conf.energyfile + #DES_glbattr.Lower_energy_integration_limit = "{:02f}".format(corrTable[inp['mode'] + 'lowIntegLimit']) + #DES_glbattr.Upper_energy_integration_limit = "{:02f}".format(corrTable[inp['mode'] + 'highIntegLimit']) + DES_glbattr.Dead_time_correction = str(math.ceil(conf.effective_deadtime / 1e-9)) + ' ns' + #DES_glbattr.Magnetic_field_filenames = srvy_fnames + DES_glbattr.skymap_avgN = corrTable['skymap_avgN'] + if usec_searchfile != None and os.path.exists(usec_searchfile): + DES_glbattr.Microsecond_offset_filename = os.path.basename(usec_searchfile) + + ''' + glb attrs needed for L1A: + Data_version: 3.4.0, 0.0.0 + Logical_file_id: mms1_fpi_fast_l1a_des-cnts_20171221020000_v3.4.0, mms1_fpi_fast_l1a_des-cnts_00000000000000_v0.0.0 + Generation_date: Mon Oct 17 15:45:17 2022, 20151102 + ''' + def populate_global_attrs(self, myDES_L1A): + + myDES_L1A.Generation_date = self.Generation_date + myDES_L1A.Logical_file_id = self.Logical_file_id.replace(".cdf","") + myDES_L1A.Data_version = self.Data_version + + ''' + POPULATE GLOBAL ATTRIBUTES + ''' + + def slow_survey_DES(DES_gblattr,corrTable, conf, MG, MGB, + sc_pot, photo_model,PKT_SECHDREXID): + luts = corrTable['photomodel_luts'] + cfile = "_".join([conf.sc_id , conf.mode, 'f1ct', corrTable["tag_des"] , + 'p' + corrTable["etag_des1"] + '.txt']) + + # BURST uses both brst magnetic field files and fast srvy magnetic field files + magfield_filenames = ((str(MG.srvy_basenames).replace("[","")[0:-1]).replace("'","")).replace(",","") + if MGB != None: + brst_magfield_filenames = ((str(MGB.srvy_basenames).replace("[", "")[0:-1]).replace("'", "")).replace(",", "") + magfield_filenames = " ".join([magfield_filenames, brst_magfield_filenames]) + + + energy_e0 = 100 + scp_energy_frac = 1.0 + DES_gblattr.Correction_table_name = cfile + DES_gblattr.Correction_table_scaling_factor = "{:0.5f}".format(float(luts[5])) + #DES_gblattr.Correction_table_rev = getfilerev(os.path.join(corrTable['pathdir'], conf.sc_id, cfile)) + correction_table_scaling_factor = "{:0.5f}".format(float(corrTable['sf_des'])) + DES_gblattr.Energy_table_name = conf.energyfile + DES_gblattr.Dead_time_correction = conf.effective_deadtime + DES_gblattr.Magnetic_field_filenames = magfield_filenames + DES_gblattr.Spacecraft_potential_filenames = ((str(sc_pot.scp_basenames).replace("[","")[0:-1]).replace("'","")).replace(",","") + DES_gblattr.Photoelectron_model_filenames = photo_model.fname + DES_gblattr.Photoelectron_model_scaling_factor = "{:0.7f}".format(float(luts[5])) + if conf.use_pfilter: + DES_gblattr.Photoelectron_filter = "On" + else: + DES_gblattr.Photoelectron_filter = "Off" + DES_gblattr.Lower_energy_integration_limit = "{:0.2f}eV".format(float(corrTable["deslowIntegLimit"])) + DES_gblattr.Upper_energy_integration_limit = "{:0.2f}eV".format(float(corrTable["deshighIntegLimit"])) + DES_gblattr.Energy_e0 = "{:0.2f}eV".format(energy_e0) # from orbit_constants + DES_gblattr.Scp_energy_fraction = "{:0.2f}eV".format(scp_energy_frac) + DES_gblattr.skymap_avgN = corrTable['skymap_avgN'] + DES_gblattr.Quadrant = PKT_SECHDREXID & 3 + DES_gblattr.High_energy_extrapolation = 'Enabled' # as this is always true + try: + DES_gblattr.Low_energy_extrapolation = (['Disabled', 'Enabled'])[DES_gblattr.lo_ext] + except TypeError: + pass + + # These files are obtained only during processing... + def build_attitude(self,defatt_filenames_obtained_from_dbcs): + # This file changes every time a day boundary is crossed and a new transform is needed (every 10 min) + for file in defatt_filenames_obtained_from_dbcs: + if file != None: + if self.Spacecraft_attitude_filenames.find(os.path.basename(file)) < 0: + self.Spacecraft_attitude_filenames = self.Spacecraft_attitude_filenames + " " + \ + os.path.basename(file) + + def slow_survey_DIS(DIS_gblattr,corrTable, conf, + MG, MGB, sc_pot, photo_model, PKT_SECHDREXID): + cfile = "_".join([conf.sc_id, conf.mode,'f1ct', corrTable["tag_dis"], 'p' + corrTable["etag_dis1"]]) + '.txt' + luts = corrTable['photomodel_luts'] + energy_e0 = 100 + scp_energy_frac = 1.0 + DIS_gblattr.Correction_table_name = cfile + DIS_gblattr.Correction_table_scaling_factor = "{:0.5f}".format(float(corrTable['sf_dis'])) + #DIS_gblattr.Correction_table_rev = getfilerev(os.path.join(corrTable['pathdir'], conf.sc_id, cfile)) + correction_table_scaling_factor = "{:0.5f}".format(float(corrTable['sf_dis'])) + DIS_gblattr.Energy_table_name = conf.energyfile + DIS_gblattr.Dead_time_correction = conf.effective_deadtime + DIS_gblattr.Magnetic_field_filenames = ( + (str(MG.srvy_basenames).replace("[", "")[0:-1]).replace("'", "")).replace(",", "") + DIS_gblattr.Spacecraft_potential_filenames = ( + (str(sc_pot.scp_basenames).replace("[", "")[0:-1]).replace("'", "")).replace(",", "") + DIS_gblattr.Photoelectron_model_filenames = photo_model.fname + DIS_gblattr.Photoelectron_model_scaling_factor = float(luts[5]) + DIS_gblattr.Photoelectron_filter = conf.use_pfilter + DIS_gblattr.Lower_energy_integration_limit = "{:0.2f}eV".format(float(corrTable["dislowIntegLimit"])) + DIS_gblattr.Upper_energy_integration_limit = "{:0.2f}eV".format(float(corrTable["dishighIntegLimit"])) + + DIS_gblattr.Energy_e0 = "{:0.2f}eV".format(energy_e0) # from orbit_constants + DIS_gblattr.Scp_energy_fraction = "{:0.2f}eV".format(scp_energy_frac) + DIS_gblattr.skymap_avgN = corrTable['skymap_avgN'] + DIS_gblattr.Quadrant = PKT_SECHDREXID & 3 + DIS_gblattr.High_energy_extrapolation = 'Enabled' + DIS_gblattr.Spacecraft_attitude_filenames = '' # updated in map loop + DIS_gblattr.High_energy_extrapolation = 'Enabled' # as this is always true + try: + DIS_gblattr.Low_energy_extrapolation = (['Disabled', 'Enabled'])[DIS_gblattr.lo_ext] + except TypeError: + pass \ No newline at end of file diff --git a/hermes_eea/SkymapFactory.py b/hermes_eea/SkymapFactory.py new file mode 100644 index 0000000..cb964a2 --- /dev/null +++ b/hermes_eea/SkymapFactory.py @@ -0,0 +1,102 @@ + +import numpy as np +from hermes_core import log +from hermes_eea.io import EEA +from hermes_eea.util.time import ccsds_to_cdf_time +from hermes_eea import energies as voltages + +# This may eventually be put inside a multiprocessor: +def SkymapFactory(l0_cdf,energies,deflections,myEEA): + #['Epoch', 'Epoch_plus_var', 'Epoch_minus_var', 'hermes_eea_step_counter', + #'hermes_eea_counter1', 'hermes_eea_counter2', 'hermes_eea_accumulations', + #'hermes_eea_sector_index', 'hermes_eea_sector_label']) + + # science_data: + start_of_good_data = np.where(l0_cdf['SHEID'][:] == 1)[0][0] + integrates_at_end = np.where(l0_cdf['SHEID'][start_of_good_data:] == 0) # has 63 values + # We are expecting integrates to be only at the beginning + + # The Science Data: + stepper_table_packets = (np.where(l0_cdf['SHEID'][:] > 0))[0] + return_package = {} + beginning_packets = np.where((l0_cdf['STEP'][stepper_table_packets[0]:]) == 0 )[0] + stepper_table_packets[0] + package = [] + + epochs = ccsds_to_cdf_time.helpConvertEEA(l0_cdf) + try: + for ptr in range(0,len(beginning_packets)): + package.append(( + l0_cdf['STEP'][beginning_packets[ptr]:beginning_packets[ptr+1]], + l0_cdf['ACCUM'][beginning_packets[ptr]:beginning_packets[ptr+1]], + l0_cdf['COUNTER1'][beginning_packets[ptr]:beginning_packets[ptr+1]], + l0_cdf['COUNTER2'][beginning_packets[ptr]:beginning_packets[ptr+1]], + epochs[beginning_packets[ptr]:beginning_packets[ptr+1]], + + energies, deflections,ptr + )) + except IndexError: + log.info("Finished last interval") + + result = [] + for pckt in package: + packet_contents = do_eea_packet(*pckt) + if packet_contents != None: + result.append(packet_contents) + myEEA.populate(myEEA, result) + + #epoch = ccsds_to_cdf_time.helpConvert_eea(l0_cdf) + #zero_values_past_first = np.where(l0_cdf['hermes_eea_intgr_or_stepper'][135:] == 0)[0] + #l0_cdf['hermes_eea_step_counter'][zero_values_past_first] + #first_packages = np.where(l0_cdf['SHEID'] > 0) + +# This does an entire sweep of, nominally, 164 thingies +def do_eea_packet( stepperTableCounter, + counts, + cnt1,cnt2, + epoch, + energies, + deflections,ith_FSmap): + + return_package = {} + rows = len(stepperTableCounter) + # skymap is already full of zeros, why do it again? + # skymap = np.zeros((beginning_packets[ptr+1]-beginning_packets[ptr],32)) + skymaps = [] + pulse_a = np.zeros((41,4), dtype=np.uint16) + pulse_b = np.zeros((41,4), dtype=np.uint16) + counter1 = np.zeros((41,4), dtype=np.uint16) + counter2 = np.zeros((41,4), dtype=np.uint16) + µepoch = np.zeros((41,4), dtype=np.uint16) + + skymap = np.zeros((41, 4, 32), dtype=np.uint16) + + for row in stepperTableCounter: + dim0 = energies[row] + dim1 = deflections[row] + if cnt1[row] > 3: + pass + skymap[dim0, dim1, :] = counts[row,0:32] + pulse_a[dim0, dim1] = counts[row][32] + pulse_b[dim0, dim1] = counts[row][33] + counter1[dim0, dim1] = cnt1[row] + counter2[dim0, dim1] = cnt2[row] + µepoch[dim0, dim1] = epoch[row] + + +# if len(stepperTableCounter) != 64: + # log.info(str(ith_FSmap) + ": stepperTable rows:" + str(len(stepperTableCounter))) + # return None + return_package['pulse_a'] = pulse_a + return_package['pulse_b'] = list(pulse_b) + return_package['counts'] = skymap + return_package['µEpoch'] = µepoch + return_package['Epoch'] = epoch[0] + return_package['stats'] = np.sum(skymap) + return_package['energies'] = voltages + return_package['sun_angles'] = deflections + return_package['counter1'] = counter1 + return_package['counter2'] = counter2 + + return return_package + + diff --git a/hermes_eea/__init__.py b/hermes_eea/__init__.py index 700d8c0..2a09808 100644 --- a/hermes_eea/__init__.py +++ b/hermes_eea/__init__.py @@ -1,6 +1,7 @@ # Licensed under Apache License v2 - see LICENSE.rst import os.path - +import sys +sys.path.append(os.getcwd()) from hermes_core import log from hermes_eea.io.file_tools import read_file @@ -20,6 +21,24 @@ INST_TO_TARGETNAME = {INST_NAME: INST_TARGETNAME} _package_directory = os.path.dirname(os.path.abspath(__file__)) + _data_directory = os.path.abspath(os.path.join(_package_directory, "data")) + log.info(f"hermes_eea version: {__version__}") + +skeleton = str( os.path.join(_data_directory, "masterSkeletons", "hermes_eea_l0_00000000000000_v0.0.0.cdf") ) +stepper_table = "flight_stepper.txt" + + +energies = [2.18000000e+00, 2.63177330e+00, 3.17717004e+00, 3.83559233e+00, + 4.63046306e+00, 5.59005918e+00, 6.74851766e+00, 8.14704980e+00, + 9.83540739e+00, 1.18736525e+01, 1.43342944e+01, 1.73048684e+01, + 2.08910507e+01, 2.52204172e+01, 3.04469818e+01, 3.67566761e+01, + 4.43739626e+01, 5.35698211e+01, 6.46713874e+01, 7.80735920e+01, + 9.42532085e+01, 1.13785815e+02, 1.37366271e+02, 1.65833433e+02, + 2.00200000e+02, 2.39800000e+02, 3.17794829e+02, 4.21157437e+02, + 5.58138682e+02, 7.39673007e+02, 9.80251281e+02, 1.29907752e+03, + 1.72160182e+03, 2.28155195e+03, 3.02362557e+03, 4.00705827e+03, + 5.31035195e+03, 7.03754125e+03, 9.32649800e+03, 1.23599368e+04, + 1.63800000e+04] diff --git a/hermes_eea/calibration/__init__.py b/hermes_eea/calibration/__init__.py index ab1f04c..7189449 100644 --- a/hermes_eea/calibration/__init__.py +++ b/hermes_eea/calibration/__init__.py @@ -1 +1,3 @@ +import os +os.environ["CDF_LIB"] = "/usr/local/cdf/lib" from .calibration import * diff --git a/hermes_eea/calibration/build_spectra.py b/hermes_eea/calibration/build_spectra.py new file mode 100644 index 0000000..ad5eeda --- /dev/null +++ b/hermes_eea/calibration/build_spectra.py @@ -0,0 +1,57 @@ + +from hermes_core.timedata import HermesData +import astropy.units as astropy_units +from astropy.timeseries import TimeSeries +from astropy.time import Time +from hermes_core.timedata import HermesData +from astropy.nddata import NDData +from ndcube import NDCube, NDCollection +from hermes_eea.util.time.iso_epoch import epoch_to_iso_obj, epoch_to_eea_iso, epoch_to_iso +import numpy as np +from astropy.wcs import WCS + +class Build_Hermes_EEA_Data: + + def __init__(self, myEEA): + self.EEA = myEEA + self.raw_counts = astropy_units.def_unit("raw instrument counts") + def build_HermesData(self): + iso_times = Time(epoch_to_iso(self.EEA.Epoch[:]), scale='utc') + ts_1d_uQ = TimeSeries( + time=iso_times, + data={"stats": astropy_units.Quantity(self.EEA.stats, "gauss", dtype=np.uint16)} + ) # this works + self._hermes_eea_spectra() + bare_attrs = HermesData.global_attribute_template("eea", "l1", "1.0.0") + ts_justTime = TimeSeries(time=iso_times) + + self.hermes_eea_data = HermesData(timeseries=ts_1d_uQ, spectra=self.multiple_spectra, meta=bare_attrs) + self.hermes_eea_data.timeseries['stats'].meta.update({"CATDESC": "Sum of skymap for each sweep"}) + + def _hermes_eea_spectra(self): + self.multiple_spectra = NDCollection( + [ + ("hermes_eea_settle_step_times", + NDCube(data=np.array(self.EEA.µEpoch), wcs=WCS(naxis=2), meta={"CATDESC": "Settle for Each Step"}, + unit="s", )), + ("hermes_eea_energy_profile", + NDCube(data=np.array(self.EEA.EnergyLabels), wcs=WCS(naxis=2), meta={"CATDESC": "Energy Profile"}, + unit="eV", )), + ("hermes_eea_accum", + NDCube(data=np.array(self.EEA.ACCUM), wcs=WCS(naxis=3), meta={"CATDESC": "EEA raw skymap"}, + unit="count" )), + + ("hermes_eea_counter1", + NDCube(data=np.array(self.EEA.Counter1), wcs=WCS(naxis=2), + meta={"CATDESC": "Estimate 1 of the number of counts in this accumulation"}, + unit=astropy_units.dimensionless_unscaled, )), + + ("hermes_eea_counter1", + NDCube(data=np.array(self.EEA.Counter1), wcs=WCS(naxis=2), + meta={"CATDESC": "Estimate 1 of the number of counts in this accumulation"}, + unit=astropy_units.dimensionless_unscaled, )), + ("hermes_eea_counter2", + NDCube(data=np.array(self.EEA.Counter1), wcs=WCS(naxis=2), + meta={"CATDESC": "Estimate 1 of the number of counts in this accumulation"}, + unit=astropy_units.dimensionless_unscaled, )) + ]) \ No newline at end of file diff --git a/hermes_eea/calibration/calibration.py b/hermes_eea/calibration/calibration.py index e1fbb7b..0bb92c5 100644 --- a/hermes_eea/calibration/calibration.py +++ b/hermes_eea/calibration/calibration.py @@ -5,16 +5,23 @@ import random import os.path from pathlib import Path - +import sys import ccsdspy import numpy as np from spacepy import pycdf from hermes_core import log from hermes_core.util.util import create_science_filename, parse_science_filename - import hermes_eea from hermes_eea.io import read_file +import hermes_eea.calibration as calib +from hermes_eea.io.EEA import EEA +from hermes_eea.SkymapFactory import SkymapFactory +from hermes_eea.Global_Attrs_Cnts import Global_Attrs +from hermes_eea.util.time.iso_epoch import epoch_to_iso_obj, epoch_to_eea_iso, epoch_to_iso + + +from hermes_eea.calibration.build_spectra import Build_Hermes_EEA_Data __all__ = [ "process_file", @@ -79,8 +86,8 @@ def calibrate_file(data_filename: Path) -> Path: # check if level 0 binary file, if so call appropriate functions if file_metadata["instrument"] == hermes_eea.INST_NAME and file_metadata["level"] == "l0": + # because of error handling, no test of data is necessary here. data = parse_l0_sci_packets(data_filename) - level1_filename = l0_sci_data_to_cdf(data, data_filename) output_filename = level1_filename elif file_metadata["instrument"] == hermes_eea.INST_NAME and file_metadata["level"] == "l1": @@ -110,10 +117,8 @@ def calibrate_file(data_filename: Path) -> Path: # create an empty file for testing purposes with open(data_filename.parent / ql_filename, "w"): pass - - # example log messages - log.info(f"Despiking removing {random.randint(0, 10)} spikes") - log.warning(f"Despiking could not remove {random.randint(1, 5)}") + # here + data = parse_l0_sci_packets(data_filename) output_filename = ql_filename else: raise ValueError(f"The file {data_filename} is not recognized.") @@ -151,13 +156,16 @@ def parse_l0_sci_packets(data_filename: Path) -> dict: def converting_ccsds_times_to_cdf(coarse, fine): - + """ + Liam was using this in his initial endeavors + I am not sure if any tests are still using it + """ epoch = np.zeros(coarse.shape[0], dtype=np.uint) p1 = np.zeros(coarse.shape[0], dtype=np.uint) p2 = np.zeros(coarse.shape[0], dtype=np.uint) tai_time = {} - tai_time["taiEpoch_tt2000"] = 1325419167816000000 + tai_time["taiEpoch_tt2000"] = 64184000000 tai_time["nanosPerMicro"] = 1000 tai_time["MicrosPerSec"] = 1000000 tai_time["nanosPerSec"] = 1000000000 @@ -165,7 +173,7 @@ def converting_ccsds_times_to_cdf(coarse, fine): p1 = np.int64(coarse) * np.int64(tai_time["nanosPerSec"]) p2 = np.int64(fine) * np.int64(tai_time["nanosPerMicro"]) epoch = p1 + p2 - result = np.uint(epoch - tai_time["taiEpoch_tt2000"]) + result = np.uint(epoch + tai_time["taiEpoch_tt2000"]) return result @@ -195,6 +203,8 @@ def l0_sci_data_to_cdf(data: dict, original_filename: Path) -> Path: >>> data_packets = calib.parse_l0_sci_packets(data_filename) # doctest: +SKIP >>> cdf_filename = calib.l0_sci_data_to_cdf(data_packets, data_filename) # doctest: +SKIP """ + + # this is transferring name.bin to name.cdf file_metadata = parse_science_filename(original_filename.name) # coarse = data["SHCOARSE"][idx] @@ -214,21 +224,40 @@ def l0_sci_data_to_cdf(data: dict, original_filename: Path) -> Path: "masterSkeletons/hermes_eea_l1_00000000000000_v0.0.0.cdf", ), ) - cdf["Epoch"] = converting_ccsds_times_to_cdf(data["SHCOARSE"], data["SHFINE"]) - cdf["hermes_eea_accumulations"] = data["ACCUM"] - cdf["hermes_eea_counter1"] = data["COUNTER1"] - cdf["hermes_eea_counter2"] = data["COUNTER2"] - cdf["hermes_eea_step_counter"] = data["STEP"] - cdf.close() + if data: + cdf = pycdf.CDF(str(cdf_filename)) + cdf.readonly(False) + + calibration_file = get_calibration_file(hermes_eea.stepper_table) + read_calibration_file(calibration_file) + + #eea_cdf = WriteEEACDF(file_metadata, data_filename, hermes_eea.skeleton) + glblattr = Global_Attrs(file_metadata['version'], + cdf_filename.name, lo_ext=False) + myEEA = EEA(file_metadata) + # This populates so doesn't have to return much + SkymapFactory(data, calib.energies, calib.deflections, myEEA) + most_active = np.where(np.array(myEEA.stats) > 150) + example_start_times = epoch_to_iso_obj(myEEA.Epoch[0:10]) - return cdf_filename + n_packets = len(myEEA.Epoch) + + hermes_eea_factory = Build_Hermes_EEA_Data(myEEA) + hermes_eea_factory.build_HermesData() + + try: + cdf_path = hermes_eea_factory.hermes_eea_data.save( str(cdf_filename.parent) , True) + except Exception as e: + log.error(e) + sys.exit(2) + + return cdf_path def get_calibration_file(data_filename: Path, time=None) -> Path: """ Given a time, return the appropriate calibration file. - Parameters ---------- data_filename: str @@ -243,7 +272,7 @@ def get_calibration_file(data_filename: Path, time=None) -> Path: Examples -------- """ - return None + return os.path.join(hermes_eea._data_directory, data_filename) def read_calibration_file(calib_filename: Path): @@ -263,4 +292,51 @@ def read_calibration_file(calib_filename: Path): Examples -------- """ - return None + lines = read_file(os.path.join(calib_filename)) + calib.energies = [] + calib.deflections = [] + for line in lines: + calib.energies.append(int(line[8:10], 16)) + calib.deflections.append(int(line[10:12], 16)) + +def retrieve_canned_attributes(): + input_attrs = { + "DOI": "https://doi.org//", + "Data_level": "L1>Level 1", # NOT AN ISTP ATTR + "Data_version": "0.0.1", + "Descriptor": "EEA>Electron Electrostatic Analyzer", + "Data_product_descriptor": "odpd", + "HTTP_LINK": [ + "https://spdf.gsfc.nasa.gov/istp_guide/istp_guide.html", + "https://spdf.gsfc.nasa.gov/istp_guide/gattributes.html", + "https://spdf.gsfc.nasa.gov/istp_guide/vattributes.html" + ], + "Instrument_mode": "default", # NOT AN ISTP ATTR + "Instrument_type": "Electric Fields (space)", + "LINK_TEXT": [ + "ISTP Guide", + "Global Attrs", + "Variable Attrs" + ], + "LINK_TITLE": [ + "ISTP Guide", + "Global Attrs", + "Variable Attrs" + ], + "MODS": [ + "v0.0.0 - Original version.", + "v1.0.0 - Include trajectory vectors and optics state.", + "v1.1.0 - Update metadata: counts -> flux.", + "v1.2.0 - Added flux error.", + "v1.3.0 - Trajectory vector errors are now deltas." + ], + "PI_affiliation": "HERMES", + "PI_name": "HERMES SOC", + "TEXT": "Valid Test Case", + "VATTRS": [ + "stats", + "energies" + ] + } + + return input_attrs \ No newline at end of file diff --git a/hermes_eea/data/flight_stepper.txt b/hermes_eea/data/flight_stepper.txt new file mode 100755 index 0000000..8471d1e --- /dev/null +++ b/hermes_eea/data/flight_stepper.txt @@ -0,0 +1,164 @@ +1014902200001770 +101d902a01001770 +1029903502001770 +1037904103001770 +1047905004001770 +105c906205001770 +1074907806001770 +1092909307001770 +10b590b208001770 +10e190d909001770 +111591070a001770 +115391400b001770 +119f91830c001770 +11fb91d50d001770 +126992380e001770 +12ef92af0f001770 +1390933f10001770 +145293ed11001770 +153d94bf12001770 +165895bc13001770 +17af96ee14001770 +194c985f15001770 +1b3e9a1c16001770 +1d989c3617001770 +002c9ec018001770 +0035803219001770 +004880431a001770 +006180591b001770 +008280761c001770 +00ad809d1d001770 +00e780d11e001770 +013481151f001770 +0199817020001770 +022081e821001770 +02d2828722001770 +03bf835a23001770 +04f8847124001770 +069785e425001770 +08bd87ce26001770 +0b968a5927001770 +0f5c8db728001770 +0f5c848d28012710 +0b96836f27011770 +08bd829726011770 +069781f425011770 +04f8817924011770 +03bf811c23011770 +02d280d622011770 +022080a121011770 +0199807920011770 +0134805b1f011770 +00e780451e011770 +00ad80331d011770 +00829da81c011770 +00619a4c1b011770 +004897c41a011770 +003595da19011770 +002c94e118011770 +1d98940a17011770 +1b3e935716011770 +194c92c315011770 +17af924814011770 +165891e313011770 +153d918f12011770 +1452914911011770 +1390910f10011770 +12ef90df0f011770 +126990b80e011770 +11fb90970d011770 +119f907c0c011770 +115390650b011770 +111590530a011770 +10e1904309011770 +10b5903608011770 +1092902c07011770 +1074902306011770 +105c901c05011770 +1047901604011770 +1037901103011770 +1029900d02011770 +101d900901011770 +1014900600011770 +1014103300022710 +101d103501021770 +1029103802021770 +1037103b03021770 +1047104004021770 +105c104505021770 +1074104b06021770 +1092105307021770 +10b5105c08021770 +10e1106609021770 +111510740a021770 +115310840b021770 +119f10970c021770 +11fb10ae0d021770 +126910ca0e021770 +12ef10ec0f021770 +1390111410021770 +1452114511021770 +153d118112021770 +165811c913021770 +17af121f14021770 +194c128815021770 +1b3e130616021770 +1d98139e17021770 +002c145618021770 +0035152a19021770 +004816cb1a021770 +006118f41b021770 +00821bd01c021770 +00ad1f9b1d021770 +00e700391e021770 +0134004c1f021770 +0199006620021770 +0220008821021770 +02d200b522021770 +03bf00f123021770 +04f8014024021770 +069701aa25021770 +08bd023526021770 +0b9602ee27021770 +0f5c03e328021770 +0f5c0d8328032710 +0b960a3127031770 +08bd07b026031770 +069705cc25031770 +04f8045f24031770 +03bf034c23031770 +02d2027c22031770 +022001df21031770 +0199016920031770 +013401101f031770 +00e700cc1e031770 +00ad00991d031770 +008200731c031770 +006100561b031770 +004800401a031770 +0035003019031770 +002c1eab18031770 +1d981c2d17031770 +1b3e1a1d16031770 +194c186715031770 +17af16fc14031770 +165815d013031770 +153d14d712031770 +1452140911031770 +1390135f10031770 +12ef12d10f031770 +1269125c0e031770 +11fb11fb0d031770 +119f11ab0c031770 +115311680b031770 +111511310a031770 +10e1110309031770 +10b510de08031770 +109210be07031770 +107410a406031770 +105c108f05031770 +1047107d04031770 +1037106e03031770 +1029106202031770 +101d105801031770 +1014104f00032710 \ No newline at end of file diff --git a/hermes_eea/data/hermes_EEA_l0_2023041-000000_v0.bin.gz b/hermes_eea/data/hermes_EEA_l0_2023041-000000_v0.bin.gz new file mode 100644 index 0000000..5fb7056 Binary files /dev/null and b/hermes_eea/data/hermes_EEA_l0_2023041-000000_v0.bin.gz differ diff --git a/hermes_eea/data/hermes_EEA_l0_2023042-000000_v0.bin b/hermes_eea/data/hermes_EEA_l0_2023042-000000_v0.bin new file mode 100644 index 0000000..a7cf7d8 Binary files /dev/null and b/hermes_eea/data/hermes_EEA_l0_2023042-000000_v0.bin differ diff --git a/hermes_eea/io/EEA.py b/hermes_eea/io/EEA.py new file mode 100644 index 0000000..bcb9777 --- /dev/null +++ b/hermes_eea/io/EEA.py @@ -0,0 +1,44 @@ + +class EEA: + def __init__(self, conf): + self.Epoch = [] # the first of each sweep, the first of each of the 164 times, + # when hermes_eea_intgr_or_stepper = 1 and hermes_eea_step_counter = 0 + self.Generation_date = None + self.Logical_file_id = None + self.Data_version = None + # ener, defl + self.µEpoch = [] #[ 41, 4 ] each of the 164 times, whenever hermes_eea_intgr_or_stepper == 1 + self.PulseA = [] # [41, 4] overflow[0],accum[33] + self.PulseB = [] # [41, 4] overflow[1], accum[34] + self.Counter1 = [] + self.Counter2 = [] + self.ACCUM = [] # [41, 4, 32]. [ene, defl, accums] + self.SunAngles = [] # [4,32] really just metadata + self.EnergyLabels = []# [41] really just metadata + self.stats = []# [41] really just metadata + + + def append(self, attrname, record): + try: + return record[attrname] + except KeyError: + # occasionally no value is returned see: compressionLoss is only in moms brst + if self.name_align(attrname) in self.default_obj: + return self.default_obj[self.name_align(attrname)] + + def populate(self, myEEA, skymap): + + packet = 0 + for record in skymap: + myEEA.µEpoch.append(record['µEpoch']) + myEEA.Epoch.append(record['Epoch']) + myEEA.ACCUM.append(record['counts']) + myEEA.PulseA.append(record['pulse_a']) + myEEA.PulseB.append(record['pulse_b']) + myEEA.SunAngles.append(record['sun_angles']) + myEEA.EnergyLabels.append(record['energies']) + myEEA.Counter1.append(record['counter1']) + myEEA.Counter2.append(record['counter2']) + myEEA.stats.append(record['stats']) + + diff --git a/hermes_eea/io/file_tools.py b/hermes_eea/io/file_tools.py index b0ed5b4..6d3dbb7 100644 --- a/hermes_eea/io/file_tools.py +++ b/hermes_eea/io/file_tools.py @@ -24,7 +24,11 @@ def read_file(data_filename): Examples -------- """ - return None + try: + with open(data_filename) as fh: + return fh.readlines() + except Exception: + raise Exception("Could not find: " + data_filename) def read_ccsds(filename: str, pkt_def: FixedLength): diff --git a/hermes_eea/tests/test_calibration.py b/hermes_eea/tests/test_calibration.py index c06bbb9..d197168 100644 --- a/hermes_eea/tests/test_calibration.py +++ b/hermes_eea/tests/test_calibration.py @@ -1,20 +1,26 @@ import pytest import os.path from pathlib import Path - +import ccsdspy import hermes_eea.calibration as calib -from hermes_eea import _data_directory +from hermes_eea import _data_directory,stepper_table from hermes_core.util.util import create_science_filename, parse_science_filename +import sys -level1_filename = "hermes_eea_l1_20221205_000000_v1.0.0.cdf" -ql_filename = "hermes_eea_ql_20221205_000000_v1.0.0.cdf" +level1a_filename = "hermes_eea_l1_20221205_000000_v1.0.0.cdf" +level1_filename = 'hermes_eea_l1_20000101T124114_v1.0.0.cdf' +ql_filename = "hermes_eea_ql_20221205_000000_v1.0.0.cdf" -@pytest.fixture(scope="session") -def level0_file(tmp_path_factory): - fn = Path(os.path.join(_data_directory, "hermes_EEA_l0_2023038-000000_v0.bin")) +@pytest.fixture(scope="session") # this is a pytest fixture +def small_level0_file(tmp_path_factory): + fn = Path(os.path.join(_data_directory, "hermes_EEA_l0_2023042-000000_v0.bin")) return fn +@pytest.fixture(scope="session") # this is a pytest fixture +def large_level0_file(tmp_path_factory): + fn = Path(os.path.join(_data_directory, "hermes_EEA_l0_2023041-000000_v0.bin")) + return fn @pytest.fixture(scope="session") def level1_file(tmp_path_factory): @@ -23,8 +29,25 @@ def level1_file(tmp_path_factory): pass return fn +def test_get_calibration_file(): + file = Path(os.path.join(_data_directory, stepper_table)) + assert file.is_file() + +def test_read_calibration_file(): + file = Path(os.path.join(_data_directory, stepper_table)) + calib.read_calibration_file(file) + assert len(calib.energies) == 164 + assert len(calib.deflections) == 164 + -def test_l0_sci_data_to_cdf(level0_file): +def test_calibrate_file(small_level0_file): + """Test that the output filenames are correct and that a file was actually created.""" + output_file = calib.calibrate_file(small_level0_file) + assert os.path.basename(output_file) == "hermes_eea_l1_20000101T170901_v1.0.0.cdf" + assert os.path.getsize(output_file) > 200000 + +# this creates a blank cdf with the proper name -- not too interesting +def not_test_l0_sci_data_to_cdf(level0_file): """Test that the output filenames are correct and that a file was actually created.""" data = {} output_file = calib.l0_sci_data_to_cdf(data, level0_file) @@ -32,19 +55,21 @@ def test_l0_sci_data_to_cdf(level0_file): assert output_file.is_file() +# This drops all the way down to ccsdspy but seems to work def test_calibrate_file_nofile_error(): """Test that if file does not exist it produces the correct error. The file needs to be in the correct format.""" with pytest.raises(FileNotFoundError): calib.calibrate_file(Path("hermes_EEA_l0_2032339-000000_v0.bin")) - +# This one is less clear as yet... def test_process_file_nofile_error(): """Test that if file does not exist it produces the correct error. The file needs to be in the correct format.""" with pytest.raises(FileNotFoundError): calib.process_file(Path("hermes_EEA_l0_2032339-000000_v0.bin")) -def test_calibrate_file(level0_file, level1_file): +# this fills the blank cdf with data +def not_test_calibrate_file(level0_file, level1a_filename): """Test that the output filenames are correct and that a file was actually created.""" output_file = calib.calibrate_file(level0_file) # assert output_file.name == level1_filename @@ -60,16 +85,16 @@ def test_calibrate_file(level0_file, level1_file): # == "Calibration file for datafile_with_no_calib.cdf not found." # ) - -def test_process_file_level0(level0_file): +# this also populates the file with data..duplicate of test_calibrate_file +def test_process_file_level0(large_level0_file): """Test that the output filenames are correct and that a file was actually created.""" - file_output = calib.process_file(level0_file) - assert len(file_output) == 1 - # assert file_output[0].name == level1_filename - assert file_output[0].is_file() + output_file = calib.process_file(large_level0_file) + assert os.path.basename(output_file[0]) == "hermes_eea_l1_20000101T124114_v1.0.0.cdf" + assert os.path.getsize(output_file[0]) > 3300000 -def test_process_file_level1(level1_file): +# this populates a level 1, a different file but doesn't really, now it is just a stub +def not_test_process_file_level1(level1_file): """Test that the output filenames are correct and that a file was actually created.""" file_output = calib.process_file(level1_file) assert len(file_output) == 1 @@ -77,9 +102,5 @@ def test_process_file_level1(level1_file): assert file_output[0].is_file() -def test_get_calibration_file(): - assert calib.get_calibration_file("") is None -def test_read_calibration_file(): - assert calib.read_calibration_file("calib_file") is None diff --git a/hermes_eea/util/time/ccsds_to_cdf_time.py b/hermes_eea/util/time/ccsds_to_cdf_time.py new file mode 100644 index 0000000..2359e39 --- /dev/null +++ b/hermes_eea/util/time/ccsds_to_cdf_time.py @@ -0,0 +1,92 @@ +"""Utils for the CCSDSPy package.""" + +__author__ = "Richard Strub " + +import numpy as np + + +def helpConvert(decoded): + coarse = np.uint(decoded["START_CORSTIME"]) + fine = np.uint(decoded["START_FINETIME"]) + epoch = converting_ccsds_times_to_cdf(coarse, fine) + return epoch + +def helpConvertEEA(decoded): + coarse = np.uint(decoded["SHCOARSE"]) + fine = np.uint(decoded["SHFINE"]) + epoch = converting_ccsds_times_to_cdf(coarse, fine) + return epoch + +def helpConvertMagFld(decoded): + coarse = np.uint(decoded["MAGMSGCOARSETM"]) + fine = np.uint(decoded["MAGMSGFINETIME"]) + epoch = converting_ccsds_times_to_cdf(coarse, fine) + return epoch + + + +def converting_ccsds_times_to_cdf(coarse, fine): + + epoch = np.zeros(coarse.shape[0], dtype=np.uint) + p1 = np.zeros(coarse.shape[0], dtype=np.uint) + p2 = np.zeros(coarse.shape[0], dtype=np.uint) + + tai_time = {} + # FPI: + # tai_time["taiEpoch_tt2000"] = 1325419167816000000 + # EEA: + tai_time["taiEpoch_tt2000"] = -64184000000 + tai_time["nanosPerMicro"] = 1000 + tai_time["MicrosPerSec"] = 1000000 + tai_time["nanosPerSec"] = 1000000000 + + example = coarse[0] * tai_time["nanosPerSec"] + p1 = np.int64(coarse) * np.int64(tai_time["nanosPerSec"]) + p2 = np.int64(fine) * np.int64(tai_time["nanosPerMicro"]) + epoch = p1 + p2 + result = np.uint(epoch - tai_time["taiEpoch_tt2000"]) + return result + +def reverse_cnv_cdf_times_to_ccsds(epoch): + + tai_time = {} + # FPI: + # tai_time["taiEpoch_tt2000"] = 1325419167816000000 + # EEA: + tai_time["taiEpoch_tt2000"] = -64184000000 + tai_time["nanosPerMicro"] = 1000 + tai_time["MicrosPerSec"] = 1000000 + tai_time["nanosPerSec"] = 1000000000 + TAI_us = int((epoch + tai_time["taiEpoch_tt2000"]) / tai_time["nanosPerMicro"]) + + coarse = int(TAI_us / tai_time["MicrosPerSec"]) #; CCSDS sec + fine = TAI_us % tai_time["MicrosPerSec"] #; CCSDS us + return (coarse, fine) + +def calc_Epoch_for_Trigger(raw_data): + ''' + I'm not at all sure what this is all about...How is this result different from the usual + PODA to CDF conversion? + :param raw_data: + :return: + ''' + Timeus = {} + Timeus["288"] = 30000 + Timeus["296"] = 150000 + thisTimeus = Timeus[str(raw_data["PREPENDED_APID"][0])] + + usPerSec = 1000000 + elePerSamp = (raw_data["CMPTRIGGERTERM"]).shape[1] # 288 = 150 , 296 = 30 + nDes = len(raw_data["PREPENDED_APID"]) + corsTime = np.zeros(nDes * elePerSamp, "u4") + fineTime = np.zeros(nDes * elePerSamp, "u4") + for i in range(0, nDes): + startusec = raw_data["START_CORSTIME"][i] * usPerSec + raw_data["START_FINETIME"][i] + stop = (elePerSamp * thisTimeus) + startusec + usecs = np.arange(startusec, stop, thisTimeus) + offset = i * elePerSamp + corsTime[offset:offset + elePerSamp] = usecs / usPerSec + fineTime[offset:offset + elePerSamp] = usecs % usPerSec + + new_Epoch = converting_ccsds_times_to_cdf(corsTime, fineTime ) + return new_Epoch \ No newline at end of file diff --git a/hermes_eea/util/time/iso_epoch.py b/hermes_eea/util/time/iso_epoch.py new file mode 100644 index 0000000..ff9f233 --- /dev/null +++ b/hermes_eea/util/time/iso_epoch.py @@ -0,0 +1,233 @@ +import os +os.environ["CDF_LIB"] = "/usr/local/cdf/lib" +import cdflib +import dateutil.parser +from sys import stderr +from datetime import datetime, date, timedelta +import spacepy.time as spt +import re +import numpy as np + +def jday_to_iso(jday: str): + try: + dateobj = datetime.strptime(jday, '%Y-%jT%H:%M:%S.%f').date() + except ValueError: + dateobj = datetime.strptime(jday, '%Y-%jT%H:%M:%S').date() + return dateobj + +def parseJdayRep(rangeItem): + _apid_pattern = (re.compile("APID\((?P\d+)\)"),) + jdayRep = re.compile("(\d\d\d\d)-([\d]+)[/ T]*(\d\d:\d\d:\d\d)[.\d]*") + year = jdayRep.match(rangeItem).group(1) + assert int(year) > 0 + doy = jdayRep.match(rangeItem).group(2) + assert int(doy) > 0 + tod = jdayRep.match(rangeItem).group(3) + assert len(doy) > 0, f"expected 8, got: {len(doy)}" + iso_version = datetime.strptime(year + doy, "%Y%j").date() + full_iso = dateutil.parser.parse(iso_version.strftime("%Y-%m-%d") + "T" + tod) + return full_iso + + +def tblenddate_to_iso(tblenddate): + d = datetime.strptime(tblenddate, "%Y%m%d%H%M%S") + return d + + +def convert_to_micro_seconds(iso_date: datetime): + """ + PODA Doesn't need it but traditionally the http request is done using these + kinds of times + :param iso_date: + :return:time in microseconds since 1980 for the PODA http request for packet dqta + """ + epoch_start = dateutil.parser.parse("1980-01-06T00:00:00") + iso_jd = spt.Ticktock(iso_date) + epoch_start_jd = spt.Ticktock(epoch_start) + diff = iso_jd.JD[0] - epoch_start_jd.JD[0] + ss = diff * 86400 + iso_jd.leaps - epoch_start_jd.leaps + ss = round(ss[0]) * 1000000 + return ss + + +def iso_obj_to_epoch(trange): + """ + ISO to CDF EPOCH: + cdflib.epochs.CDFepoch.parse('2012-01-01T01:01:01.000000000') + + :param trange: a list, typically 2, of datetime objects + :return:18 digit epoch times for CDF + """ + converted = [] + for t in trange: + #dateString = t.strftime("%Y-%m-%dT%H:%M:%S.000000000") + dateString = t.strftime("%Y-%m-%dT%H:%M:%S.%f000") + try: + c = cdflib.epochs.CDFepoch.parse(dateString) + converted.append(c) + except ValueError as e: + print(t + " This time range value doesn't look too kosher...", file=stderr) + # exit(1) + return converted +def iso_to_epoch(trange): + """ + ISO to CDF EPOCH: + cdflib.epochs.CDFepoch.parse('2012-01-01T01:01:01.000000000') + + :param trange: a list, typically 2, of datetime strings + :return:18 digit epoch times for CDF + """ + converted = [] + for t in trange: + try: + c = cdflib.epochs.CDFepoch.parse(t) + converted.append(c) + except ValueError as e: + print(t + " This time range value doesn't look too kosher...", file=stderr) + # exit(1) + return converted + +''' +This returns a string, not a date object +''' +def epoch_to_iso(trange): + """ + CDF EPOCH TO ISO: + cdflib.epochs.CDFepoch.encode_tt2000(378651727184000000) + + :param trange:18 digit CDF epoch times + :return:a list,typically 2, of datetime strings in iso format + """ + in_iso = [] + for t in trange: + c = cdflib.epochs.CDFepoch.encode_tt2000(int(t)) + in_iso.append(c) + return in_iso + +def epoch_to_eea_iso(trange): + """ + CDF EPOCH TO ISO: + cdflib.epochs.CDFepoch.encode_tt2000(378651727184000000) + + :param trange:18 digit CDF epoch times + :return:a list,typically 2, of datetime strings in iso format + """ + in_iso = [] + for t in trange: + c = cdflib.epochs.CDFepoch.encode_tt2000(int(t)) + in_iso.append((c.replace("T"," ")[0:19])) + return in_iso + +def epoch_to_iso_obj(trange): + """ + CDF EPOCH TO ISO: + cdflib.epochs.CDFepoch.encode_tt2000(378651727184000000) + + :param trange:18 digit CDF epoch times + :return:a list,typically 2, of datetime strings in iso format + """ + in_iso = [] + for t in trange: + c = cdflib.epochs.CDFepoch.encode_tt2000(int(t)) + d = datetime.strptime(c[0:26], "%Y-%m-%dT%H:%M:%S.%f") + in_iso.append(d) + return in_iso + +def str_to_iso(str_range): + iso_range = [] + for t in str_range: + try: + iso_range.append(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S.%f')) + except ValueError: + try: + iso_range.append(datetime.strptime(t[0:26], '%Y-%m-%dT%H:%M:%S.%f')) + except ValueError: + iso_range.append(datetime.strptime(t[0:19], '%Y-%m-%dT%H:%M:%S')) + + return iso_range #iso_range.append(t.strftime("%Y-%m-%dT%H:%M:%S.%f")) + +def cdf_epoch_tojuldays(epoch_time): + """ + print,cdf_epoch_tojuldays(567098205397306000) = 2458108.6' + cdflib.epochs.CDFepoch.currentJDay() + if you give this guy a midnight value it gives you .5 + If you give this guy a noon value then it gives you .0 + So if our time is 03:00 then 12+3=15/24=.625 + + Doesn't seem to be used at the moment + + :param epoch_time: + :return: + """ + + iso_string = cdflib.epochs.CDFepoch.encode_tt2000(epoch_time) + iso_obj = dateutil.parser.parse(iso_string) + jDayP1 = cdflib.epochs.CDFepoch._JulianDay(iso_obj.year, iso_obj.month, iso_obj.day) + fraction = 12 + iso_obj.hour + jday = jDayP1 - 1 + fraction + return jday + +def cdf_epoch_tojuldays_24(epoch_time): + """ + print,cdf_epoch_tojuldays(567098205397306000) = 2458108.6' + cdflib.epochs.CDFepoch.currentJDay() + if you give this guy a midnight value it gives you .5 + If you give this guy a noon value then it gives you .0 + So if our time is 03:00 then 12+3=15/24=.625 + + Doesn't seem to be used at the moment + + :param epoch_time: + :return: + """ + if isinstance(epoch_time, str): + Epoch_FS0 = int(epoch_time) + iso_obj = epoch_to_iso([Epoch_FS0]) + elif isinstance(epoch_time, datetime): + iso_obj = epoch_time + elif isinstance(epoch_time,int): + iso_string = cdflib.epochs.CDFepoch.encode_tt2000(epoch_time) + iso_obj = dateutil.parser.parse(iso_string) + elif epoch_time.dtype == np.uint64: + iso_string = cdflib.epochs.CDFepoch.encode_tt2000(epoch_time) + iso_obj = dateutil.parser.parse(iso_string) + + jDayP1 = cdflib.epochs.CDFepoch._JulianDay(iso_obj.year, iso_obj.month, iso_obj.day) + fraction = 12 + iso_obj.hour + jday = jDayP1 -1 + fraction/24 + return jday + + + +''' +Nominally,Epoch_FS0 is one of the elements of the epoch +array extracted using Daniel's ccsds.py and as such is np.int64 ''' +def epoch_to_matching(Epoch_FS0): + """ + This produces the YYYYMMDDHHMMSSmillisec 20 char string used for match data tables''' + + :param Epoch_FS0: 18 digit epoch time + :return: iso formatted string + """ + if isinstance(Epoch_FS0, str): + Epoch_FS0 = int(Epoch_FS0) + isoTimeString = epoch_to_iso([Epoch_FS0]) + mD = dateutil.parser.parse(isoTimeString[0]) + elif isinstance(Epoch_FS0,datetime): + mD = Epoch_FS0 + elif Epoch_FS0.dtype == np.uint64: + isoTimeString = epoch_to_iso([Epoch_FS0]) + mD = dateutil.parser.parse(isoTimeString[0]) + + matchingString = "".join( + [ + str(mD.year), + "{:02d}".format(mD.month), + "{:02d}".format(mD.day), + "{:02d}".format(mD.hour), + "{:02d}".format(mD.minute), + "{:02d}".format(mD.second), + ] + ) + return matchingString + diff --git a/pyproject.toml b/pyproject.toml index 1aabd95..37caee1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ dynamic = ["version"] authors = [{name = "Steven Christe", email="steven.d.christe@nasa.gov"}, {name = "Damian Barrous Dumme", email="damianbarrous@gmail.com"}] license = {file = "LICENSE.rst"} -requires-python = ">=3.7" +requires-python = ">=3.9" keywords = ["hermes", "nasa mission", "space weather"] classifiers = [ "Development Status :: 3 - Alpha", @@ -20,16 +20,12 @@ classifiers = [ "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Physics", ] dependencies = [ - 'astropy>=4.1.0', - 'numpy>=1.16.0', 'hermes_core @ git+https://github.com/HERMES-SOC/hermes_core/', 'ccsdspy @ git+https://github.com/ddasilva/ccsdspy.git' ]