Skip to content

Commit

Permalink
JP-3004: Should have NaN's for Suppressed Ramps, but Getting Zeros. (#…
Browse files Browse the repository at this point in the history
…141)

* Changing the way one group ramps get suppressed.  Changing the large variance threshold and standardizing it as a global variable.

* Updating tests due to changes made in processing variances.

* Updated the change log.

* Updating comment for one good group ramp suppression.

* Updated the change log.

Co-authored-by: Howard Bushouse <[email protected]>
  • Loading branch information
kmacdonald-stsci and hbushouse authored Jan 26, 2023
1 parent f8c3d23 commit 3a60f1f
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 19 deletions.
7 changes: 6 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,12 @@ General
Bug Fixes
---------

-

ramp_fitting
~~~~~~~~~~~~

- Fixed zeros that should be NaNs in rate and rateints product and suppressed
a cast warning due to attempts to cast NaN to an integer. [#141]

Changes to API
--------------
Expand Down
6 changes: 3 additions & 3 deletions src/stcal/ramp_fitting/ols_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -1105,7 +1105,7 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans)

# Huge variances correspond to non-existing segments, so are reset to 0
# to nullify their contribution.
var_p3[var_p3 > 0.1 * utils.LARGE_VARIANCE] = 0.
var_p3[var_p3 > utils.LARGE_VARIANCE_THRESHOLD] = 0.
var_p3[:, med_rates <= 0.] = 0.
warnings.resetwarnings()

Expand Down Expand Up @@ -1396,8 +1396,8 @@ def ramp_fit_overall(
# to nullify their contribution.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning)
var_p2[var_p2 > 0.1 * utils.LARGE_VARIANCE] = 0.
var_r2[var_r2 > 0.1 * utils.LARGE_VARIANCE] = 0.
var_p2[var_p2 > utils.LARGE_VARIANCE_THRESHOLD] = 0.
var_r2[var_r2 > utils.LARGE_VARIANCE_THRESHOLD] = 0.

# Some contributions to these vars may be NaN as they are from ramps
# having PIXELDQ=DO_NOT_USE
Expand Down
10 changes: 6 additions & 4 deletions src/stcal/ramp_fitting/ramp_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,14 +264,16 @@ def suppress_one_good_group_ramps(ramp_data):
ngood_groups = good_groups.sum(axis=0)
wh_one = np.where(ngood_groups == 1)

# Suppress the ramps with only one good group by flagg
# Suppress the ramps with only one good group by flagging
# all groups in the ramp as DO_NOT_USE.
wh1_rows = wh_one[0]
wh1_cols = wh_one[1]
for n in range(len(wh1_rows)):
row = wh1_rows[n]
col = wh1_cols[n]
# For ramps that have good 0th group, but the rest of the
# ramp saturated, mark the 0th groups as saturated, too.
# Find ramps that have good 0th group, but the rest of the
# ramp flagged.
good_index = np.where(ramp_data.groupdq[integ, :, row, col] == 0)
if ramp_data.groupdq[integ, good_index, row, col] == 0:
ramp_data.groupdq[integ, good_index, row, col] = dnu_flag
ramp_data.groupdq[integ, :, row, col] = np.bitwise_or(
ramp_data.groupdq[integ, :, row, col], dnu_flag)
20 changes: 12 additions & 8 deletions src/stcal/ramp_fitting/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

# Replace zero or negative variances with this:
LARGE_VARIANCE = 1.e8
LARGE_VARIANCE_THRESHOLD = 0.01 * LARGE_VARIANCE


class OptRes:
Expand Down Expand Up @@ -272,15 +273,15 @@ def output_optional(self, effintim):
opt_info : tuple
The tuple of computed optional results arrays for fitting.
"""
self.var_p_seg[self.var_p_seg > 0.4 * LARGE_VARIANCE] = 0.
self.var_r_seg[self.var_r_seg > 0.4 * LARGE_VARIANCE] = 0.
self.var_p_seg[self.var_p_seg > LARGE_VARIANCE_THRESHOLD] = 0.
self.var_r_seg[self.var_r_seg > LARGE_VARIANCE_THRESHOLD] = 0.

# Suppress, then re-enable, arithmetic warnings
warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning)
warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning)

# Tiny 'weights' values correspond to non-existent segments, so set to 0.
self.weights[1. / self.weights > 0.4 * LARGE_VARIANCE] = 0.
self.weights[1. / self.weights > LARGE_VARIANCE_THRESHOLD] = 0.
warnings.resetwarnings()

self.slope_seg /= effintim
Expand Down Expand Up @@ -533,8 +534,10 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg
del wh_good

# Locate any CRs that appear before the first SAT group...
wh_cr = np.where(
gdq_2d_nan[i_read, :].astype(np.int32) & ramp_data.flags_jump_det > 0)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning)
wh_cr = np.where(
gdq_2d_nan[i_read, :].astype(np.int32) & ramp_data.flags_jump_det > 0)

# ... but not on final read:
if len(wh_cr[0]) > 0 and (i_read < nreads - 1):
Expand Down Expand Up @@ -713,9 +716,9 @@ def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_bot
warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning)
warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning)

var_p3[var_p3 > 0.4 * LARGE_VARIANCE] = 0.
var_r3[var_r3 > 0.4 * LARGE_VARIANCE] = 0.
var_both3[var_both3 > 0.4 * LARGE_VARIANCE] = 0.
var_p3[var_p3 > LARGE_VARIANCE_THRESHOLD] = 0.
var_r3[var_r3 > LARGE_VARIANCE_THRESHOLD] = 0.
var_both3[var_both3 > LARGE_VARIANCE_THRESHOLD] = 0.

data = slope_int / effintim
invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use
Expand All @@ -725,6 +728,7 @@ def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_bot
err = np.sqrt(var_both3)
dq = dq_int
var_poisson = var_p3

var_rnoise = var_r3
integ_info = (data, dq, var_poisson, var_rnoise, err)

Expand Down
6 changes: 3 additions & 3 deletions tests/test_ramp_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,7 +924,7 @@ def test_dq_multi_int_dnu():
[[0.00173518]]])
np.testing.assert_allclose(cvp, check, tol, tol)

check = np.array([[[2.5000000e+07]],
check = np.array([[[0.]],
[[4.3379547e-04]]])
np.testing.assert_allclose(cvr, check, tol, tol)

Expand Down Expand Up @@ -1038,8 +1038,8 @@ def test_new_saturation():
[[0.00086654, 0. , 0.]]])
np.testing.assert_allclose(cvp, check, tol, tol)

check = np.array([[[6.5232398e-06, 6.1970772e-05, 3.3333334e+07]],
[[6.1970772e-05, 3.3333334e+07, 3.3333334e+07]]])
check = np.array([[[6.5232398e-06, 6.1970772e-05, 0.]],
[[6.1970772e-05, 0., 0.]]])
np.testing.assert_allclose(cvr, check, tol, tol)

check = np.array([[[0.02353317, 0.02258242, 0.]],
Expand Down

0 comments on commit 3a60f1f

Please sign in to comment.