From a85d35c071191fcc0ba5f881d2c33e592e4864b3 Mon Sep 17 00:00:00 2001 From: Sara Ogaz Date: Fri, 18 Jan 2019 15:46:27 -0500 Subject: [PATCH 1/2] Combiner refactor into baseclass and combine type subclasses. #578. First Pass --- ccdproc/combiner.py | 261 ++++++++++++++++++++++++--------- ccdproc/tests/test_combiner.py | 221 +++++++++++++++------------- 2 files changed, 307 insertions(+), 175 deletions(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index 38dcce6d..c9e8a2a1 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -2,6 +2,8 @@ """This module implements the combiner class.""" +from abc import ABC, abstractmethod + import numpy as np from numpy import ma from .core import sigma_func @@ -9,12 +11,13 @@ from astropy.nddata import CCDData, StdDevUncertainty from astropy import log -__all__ = ['Combiner', 'combine'] +__all__ = ['MedianCombiner', 'SumCombiner', 'AverageCombiner', 'combine'] -class Combiner(object): +class CombinerBase(ABC): """ - A class for combining CCDData objects. + + A class for base class combining CCDData objects. The Combiner class is used to combine together `~astropy.nddata.CCDData` objects including the method for combining the data, rejecting outlying data, @@ -30,6 +33,14 @@ class Combiner(object): description. If ``None`` it uses ``np.float64``. Default is ``None``. + use_input_uncertainties : boolean + default value here is False, in which case the original uncertainty + calculations will be used. If True we can generate uncertainties using + the input CCDData uncertainties. + + uncertainty_func : function, optional + Function to calculate uncertainty. Defaults to `numpy.ma.std`. + Raises ------ TypeError @@ -44,19 +55,21 @@ class Combiner(object): >>> import numpy as np >>> import astropy.units as u >>> from astropy.nddata import CCDData - >>> from ccdproc import Combiner + >>> from ccdproc import AverageCombiner >>> ccddata1 = CCDData(np.ones((4, 4)), unit=u.adu) >>> ccddata2 = CCDData(np.zeros((4, 4)), unit=u.adu) >>> ccddata3 = CCDData(np.ones((4, 4)), unit=u.adu) - >>> c = Combiner([ccddata1, ccddata2, ccddata3]) - >>> ccdall = c.average_combine() - >>> ccdall # doctest: +FLOAT_CMP + >>> c = AverageCombiner([ccddata1, ccddata2, ccddata3]) + >>> ccdall = c.combiner_method() + >>> ccdall CCDData([[ 0.66666667, 0.66666667, 0.66666667, 0.66666667], [ 0.66666667, 0.66666667, 0.66666667, 0.66666667], [ 0.66666667, 0.66666667, 0.66666667, 0.66666667], [ 0.66666667, 0.66666667, 0.66666667, 0.66666667]]) """ - def __init__(self, ccd_list, dtype=None): + # TODO: add uncertainty_func setter? + def __init__(self, ccd_list, dtype=None, use_input_uncertainties=False, + uncertainty_func=None): if ccd_list is None: raise TypeError("ccd_list should be a list of CCDData objects.") @@ -89,6 +102,8 @@ def __init__(self, ccd_list, dtype=None): self.unit = default_unit self.weights = None self._dtype = dtype + self.use_input_uncer = use_input_uncertainties + self.uncertainty_func = uncertainty_func # set up the data array new_shape = (len(ccd_list),) + default_shape @@ -309,9 +324,37 @@ def _get_scaled_data(self, scale_arg): return self.data_arr * self.scaling return self.data_arr + @abstractmethod + def combiner_method(self, combine_func=None, scale_to=None): + """ + Abstract container method for combiner's defined in subclasses. I + actually think it might be worthwile to re-tool this so we don't have + repeat code, but my initial solution to that might make the implementation + a little harder to understand for someone reading the code, as the + top level combiner fucntion in this abstrac base class would no longer + be the abstract method. + """ + + pass + + @abstractmethod + def uncertainty_fresh(self, masked_values=None): + pass + + @abstractmethod + def uncertainty_provided(self): + pass + + +class MedianCombiner(CombinerBase): + + def __init__(self, ccd_list, dtype=None, use_input_uncertainties=False, + uncertainty_func=sigma_func): + super().__init__(ccd_list, dtype, use_input_uncertainties, + uncertainty_func) + # set up the combining algorithms - def median_combine(self, median_func=ma.median, scale_to=None, - uncertainty_func=sigma_func): + def combiner_method(self, combine_func=ma.median, scale_to=None): """ Median combine a set of arrays. @@ -324,18 +367,13 @@ def median_combine(self, median_func=ma.median, scale_to=None, Parameters ---------- - median_func : function, optional + combine_func : function, optional Function that calculates median of a `numpy.ma.MaskedArray`. Default is `numpy.ma.median`. scale_to : float or None, optional - Scaling factor used in the average combined image. If given, - it overrides `scaling`. - Defaults to None. - - uncertainty_func : function, optional - Function to calculate uncertainty. - Defaults is `~ccdproc.sigma_func`. + Scaling factor used in the sum combined image. If given, + it overrides `scaling`. Defaults to ``None``. Returns ------- @@ -347,15 +385,38 @@ def median_combine(self, median_func=ma.median, scale_to=None, The uncertainty currently calculated using the median absolute deviation does not account for rejected pixels. """ + # set the data - data = median_func(self._get_scaled_data(scale_to), axis=0) + data = combine_func(self._get_scaled_data(scale_to), axis=0) # set the mask masked_values = self.data_arr.mask.sum(axis=0) mask = (masked_values == len(self.data_arr)) + # handle uncertainty, this may change + if self.use_input_uncer: + uncertainty = self.uncertainty_provided() + else: + uncertainty = self.uncertainty_fresh(masked_values) + + # create the combined image with a dtype matching the combiner + combined_image = CCDData(np.asarray(data.data, dtype=self.dtype), + mask=mask, unit=self.unit, + uncertainty=StdDevUncertainty(uncertainty)) + + # update the meta data + combined_image.meta['NCOMBINE'] = len(self.data_arr) + + # return the combined image + return combined_image + + def uncertainty_fresh(self, masked_values=None): + """ + + :return: + """ # set the uncertainty - uncertainty = uncertainty_func(self.data_arr, axis=0) + uncertainty = self.uncertainty_func(self.data_arr, axis=0) # Divide uncertainty by the number of pixel (#309) uncertainty /= np.sqrt(len(self.data_arr) - masked_values) # Convert uncertainty to plain numpy array (#351) @@ -365,19 +426,27 @@ def median_combine(self, median_func=ma.median, scale_to=None, # masks. uncertainty = np.asarray(uncertainty) - # create the combined image with a dtype matching the combiner - combined_image = CCDData(np.asarray(data.data, dtype=self.dtype), - mask=mask, unit=self.unit, - uncertainty=StdDevUncertainty(uncertainty)) + return uncertainty - # update the meta data - combined_image.meta['NCOMBINE'] = len(self.data_arr) + def uncertainty_provided(self): + """ + + + + """ + # TODO: this function + return None - # return the combined image - return combined_image - def average_combine(self, scale_func=ma.average, scale_to=None, - uncertainty_func=ma.std): +class AverageCombiner(CombinerBase): + def __init__(self, ccd_list, dtype=None, use_input_uncertainties=False, + uncertainty_func=ma.std): + super().__init__(ccd_list, dtype, use_input_uncertainties, + uncertainty_func) + + # set up the combining algorithms + def combiner_method(self, combine_func=ma.average, scale_to=None): + """ Average combine together a set of arrays. @@ -391,37 +460,34 @@ def average_combine(self, scale_func=ma.average, scale_to=None, Parameters ---------- - scale_func : function, optional + combine_func : function, optional Function to calculate the average. Defaults to `numpy.ma.average`. - scale_to : float or None, optional - Scaling factor used in the average combined image. If given, + scale_to : float or None, optional + Scaling factor used in the sum combined image. If given, it overrides `scaling`. Defaults to ``None``. - uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. - Returns ------- combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + # set up the data - data, wei = scale_func(self._get_scaled_data(scale_to), - axis=0, weights=self.weights, - returned=True) + data, wei = combine_func(self._get_scaled_data(scale_to), + axis=0, weights=self.weights, + returned=True) # set up the mask masked_values = self.data_arr.mask.sum(axis=0) mask = (masked_values == len(self.data_arr)) - # set up the deviation - uncertainty = uncertainty_func(self.data_arr, axis=0) - # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(self.data_arr) - masked_values) - # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) + # handle uncertainty, this may change + if self.use_input_uncer: + uncertainty = self.uncertainty_provided() + else: + uncertainty = self.uncertainty_fresh(masked_values) # create the combined image with a dtype that matches the combiner combined_image = CCDData(np.asarray(data.data, dtype=self.dtype), @@ -434,8 +500,36 @@ def average_combine(self, scale_func=ma.average, scale_to=None, # return the combined image return combined_image - def sum_combine(self, sum_func=ma.sum, scale_to=None, - uncertainty_func=ma.std): + def uncertainty_fresh(self, masked_values=None): + """ + + :return: + """ + # set up the deviation + uncertainty = self.uncertainty_func(self.data_arr, axis=0) + # Divide uncertainty by the number of pixel (#309) + uncertainty /= np.sqrt(len(self.data_arr) - masked_values) + # Convert uncertainty to plain numpy array (#351) + uncertainty = np.asarray(uncertainty) + + return uncertainty + + def uncertainty_provided(self): + """ + + """ + # TODO: this function + return None + + +class SumCombiner(CombinerBase): + def __init__(self, ccd_list, dtype=None, use_input_uncertainties=False, + uncertainty_func=ma.std): + super().__init__(ccd_list, dtype, use_input_uncertainties, + uncertainty_func) + + # set up the combining algorithms + def combiner_method(self, combine_func=ma.sum, scale_to=None): """ Sum combine together a set of arrays. @@ -452,7 +546,7 @@ def sum_combine(self, sum_func=ma.sum, scale_to=None, Parameters ---------- - sum_func : function, optional + combine_func : function, optional Function to calculate the sum. Defaults to `numpy.ma.sum`. @@ -460,29 +554,25 @@ def sum_combine(self, sum_func=ma.sum, scale_to=None, Scaling factor used in the sum combined image. If given, it overrides `scaling`. Defaults to ``None``. - uncertainty_func : function, optional - Function to calculate uncertainty. Defaults to `numpy.ma.std`. - Returns ------- combined_image: `~astropy.nddata.CCDData` CCDData object based on the combined input of CCDData objects. """ + # set up the data - data = sum_func(self._get_scaled_data(scale_to), axis=0) + data = combine_func(self._get_scaled_data(scale_to), axis=0) + # set up the mask masked_values = self.data_arr.mask.sum(axis=0) mask = (masked_values == len(self.data_arr)) - # set up the deviation - uncertainty = uncertainty_func(self.data_arr, axis=0) - # Divide uncertainty by the number of pixel (#309) - uncertainty /= np.sqrt(len(self.data_arr) - masked_values) - # Convert uncertainty to plain numpy array (#351) - uncertainty = np.asarray(uncertainty) - # Multiply uncertainty by square root of the number of images - uncertainty *= len(self.data_arr) - masked_values + # handle uncertainty, this may change + if self.use_input_uncer: + uncertainty = self.uncertainty_provided() + else: + uncertainty = self.uncertainty_fresh(masked_values) # create the combined image with a dtype that matches the combiner combined_image = CCDData(np.asarray(data.data, dtype=self.dtype), @@ -495,6 +585,30 @@ def sum_combine(self, sum_func=ma.sum, scale_to=None, # return the combined image return combined_image + def uncertainty_fresh(self, masked_values=None): + """ + + :return: + """ + # set up the deviation + uncertainty = self.uncertainty_func(self.data_arr, axis=0) + # Divide uncertainty by the number of pixel (#309) + uncertainty /= np.sqrt(len(self.data_arr) - masked_values) + # Convert uncertainty to plain numpy array (#351) + uncertainty = np.asarray(uncertainty) + # Multiply uncertainty by square root of the number of images + uncertainty *= len(self.data_arr) - masked_values + + return uncertainty + + def uncertainty_provided(self): + """ + + + """ + # TODO: this function + return None + def _calculate_step_sizes(x_size, y_size, num_chunks): """ @@ -530,7 +644,8 @@ def combine(img_list, output_file=None, sigma_clip=False, sigma_clip_low_thresh=3, sigma_clip_high_thresh=3, sigma_clip_func=ma.mean, sigma_clip_dev_func=ma.std, - dtype=None, combine_uncertainty_function=None, **ccdkwargs): + dtype=None, use_input_uncertainties=False, combine_uncertainty_function=None, + **ccdkwargs): """ Convenience function for combining multiple images. @@ -647,11 +762,11 @@ def combine(img_list, output_file=None, # Select Combine function to call in Combiner if method == 'average': - combine_function = 'average_combine' + Combine_class = AverageCombiner elif method == 'median': - combine_function = 'median_combine' + Combine_class = MedianCombiner elif method == 'sum': - combine_function = 'sum_combine' + Combine_class = SumCombiner else: raise ValueError("unrecognised combine method : {0}.".format(method)) @@ -769,8 +884,15 @@ def combine(img_list, output_file=None, # https://github.com/astropy/ccdproc/pull/630 ccd_list.append(imgccd[x:xend, y:yend].copy()) + # if uncertainty function is provided, use that function + combine_kwds_class = {} + if combine_uncertainty_function is not None: + combine_kwds_class['uncertainty_func'] = combine_uncertainty_function + # Create Combiner for tile - tile_combiner = Combiner(ccd_list, dtype=dtype) + tile_combiner = Combine_class(ccd_list, dtype=dtype, + use_input_uncertainties=use_input_uncertainties, + **combine_kwds_class) # Set all properties and call all methods for to_set in to_set_in_combiner: @@ -778,12 +900,9 @@ def combine(img_list, output_file=None, for to_call in to_call_in_combiner: getattr(tile_combiner, to_call)(**to_call_in_combiner[to_call]) - # Finally call the combine algorithm - combine_kwds = {} - if combine_uncertainty_function is not None: - combine_kwds['uncertainty_func'] = combine_uncertainty_function - - comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) + # Finally call the combine method + #comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds) + comb_tile = tile_combiner.combiner_method() # add it back into the master image ccd.data[x:xend, y:yend] = comb_tile.data diff --git a/ccdproc/tests/test_combiner.py b/ccdproc/tests/test_combiner.py index 767a28f8..cc19885d 100644 --- a/ccdproc/tests/test_combiner.py +++ b/ccdproc/tests/test_combiner.py @@ -9,19 +9,21 @@ from astropy.utils.data import get_pkg_data_filename from astropy.nddata import CCDData -from ..combiner import Combiner, combine, _calculate_step_sizes +from ..combiner import CombinerBase, MedianCombiner, SumCombiner, \ + AverageCombiner, combine, _calculate_step_sizes +#test for using base class raises error # test that the Combiner raises error if empty def test_combiner_empty(): with pytest.raises(TypeError): - Combiner() # empty initializer should fail + MedianCombiner() # empty initializer should fail # test that the Combiner raises error if empty if ccd_list is None def test_combiner_init_with_none(): with pytest.raises(TypeError): - Combiner(None) # empty initializer should fail + SumCombiner(None) # empty initializer should fail # test that Combiner throws an error if input @@ -29,7 +31,7 @@ def test_combiner_init_with_none(): def test_ccddata_combiner_objects(ccd_data): ccd_list = [ccd_data, ccd_data, None] with pytest.raises(TypeError): - Combiner(ccd_list) # different objects should fail + AverageCombiner(ccd_list) # different objects should fail # test that Combiner throws an error if input @@ -38,7 +40,7 @@ def test_ccddata_combiner_size(ccd_data): ccd_large = CCDData(np.zeros((200, 100)), unit=u.adu) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): - Combiner(ccd_list) # arrays of different sizes should fail + MedianCombiner(ccd_list) # arrays of different sizes should fail # test that Combiner throws an error if input @@ -47,13 +49,13 @@ def test_ccddata_combiner_units(ccd_data): ccd_large = CCDData(np.zeros((100, 100)), unit=u.second) ccd_list = [ccd_data, ccd_data, ccd_large] with pytest.raises(TypeError): - Combiner(ccd_list) + SumCombiner(ccd_list) # test if mask and data array are created def test_combiner_create(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) + c = AverageCombiner(ccd_list) assert c.data_arr.shape == (3, 100, 100) assert c.data_arr.mask.shape == (3, 100, 100) @@ -61,17 +63,19 @@ def test_combiner_create(ccd_data): # test if dtype matches the value that is passed def test_combiner_dtype(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list, dtype=np.float32) - assert c.data_arr.dtype == np.float32 - avg = c.average_combine() + c1 = AverageCombiner(ccd_list, dtype=np.float32) + assert c1.data_arr.dtype == np.float32 + avg = c1.combiner_method() # dtype of average should match input dtype - assert avg.dtype == c.dtype - med = c.median_combine() + c2 = MedianCombiner(ccd_list, dtype=np.float32) + assert avg.dtype == c2.dtype + med = c2.combiner_method() # dtype of median should match dtype of input - assert med.dtype == c.dtype - result_sum = c.sum_combine() + assert med.dtype == c2.dtype + c3 = SumCombiner(ccd_list, dtype=np.float32) + result_sum = c3.combiner_method() # dtype of sum should match dtype of input - assert result_sum.dtype == c.dtype + assert result_sum.dtype == c3.dtype # test mask is created from ccd.data @@ -81,7 +85,7 @@ def test_combiner_mask(): mask = (data == 0) ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] - c = Combiner(ccd_list) + c = AverageCombiner(ccd_list) assert c.data_arr.shape == (3, 10, 10) assert c.data_arr.mask.shape == (3, 10, 10) assert not c.data_arr.mask[0, 5, 5] @@ -89,14 +93,14 @@ def test_combiner_mask(): def test_weights(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) + c = MedianCombiner(ccd_list) with pytest.raises(TypeError): c.weights = 1 def test_weights_shape(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) + c = SumCombiner(ccd_list) with pytest.raises(ValueError): c.weights = ccd_data.data @@ -107,9 +111,9 @@ def test_combiner_minmax(): CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] - c = Combiner(ccd_list) + c = MedianCombiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=500) - ccd = c.median_combine() + ccd = c.combiner_method() assert ccd.data.mean() == 0 @@ -118,7 +122,7 @@ def test_combiner_minmax_max(): CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] - c = Combiner(ccd_list) + c = SumCombiner(ccd_list) c.minmax_clipping(min_clip=None, max_clip=500) assert c.data_arr[2].mask.all() @@ -128,7 +132,7 @@ def test_combiner_minmax_min(): CCDData(np.zeros((10, 10)) - 1000, unit=u.adu), CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] - c = Combiner(ccd_list) + c = AverageCombiner(ccd_list) c.minmax_clipping(min_clip=-500, max_clip=None) assert c.data_arr[1].mask.all() @@ -141,8 +145,8 @@ def test_combiner_sigmaclip_high(): CCDData(np.zeros((10, 10)) + 10, unit=u.adu), CCDData(np.zeros((10, 10)) + 1000, unit=u.adu)] - c = Combiner(ccd_list) - # using mad for more robust statistics vs. std + c = MedianCombiner(ccd_list) + #using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=3, low_thresh=None, func=np.ma.median, dev_func=mad) assert c.data_arr[5].mask.all() @@ -155,9 +159,10 @@ def test_combiner_sigmaclip_single_pix(): CCDData(np.zeros((10, 10)) - 10, unit=u.adu), CCDData(np.zeros((10, 10)) + 10, unit=u.adu), CCDData(np.zeros((10, 10)) - 10, unit=u.adu)] - c = Combiner(ccd_list) - # add a single pixel in another array to check that - # that one gets rejected + + c = SumCombiner(ccd_list) + #add a single pixel in another array to check that + #that one gets rejected c.data_arr[0, 5, 5] = 0 c.data_arr[1, 5, 5] = -5 c.data_arr[2, 5, 5] = 5 @@ -176,8 +181,8 @@ def test_combiner_sigmaclip_low(): CCDData(np.zeros((10, 10)) + 10, unit=u.adu), CCDData(np.zeros((10, 10)) - 1000, unit=u.adu)] - c = Combiner(ccd_list) - # using mad for more robust statistics vs. std + c = AverageCombiner(ccd_list) + #using mad for more robust statistics vs. std c.sigma_clipping(high_thresh=None, low_thresh=3, func=np.ma.median, dev_func=mad) assert c.data_arr[5].mask.all() @@ -186,8 +191,8 @@ def test_combiner_sigmaclip_low(): # test that the median combination works and returns a ccddata object def test_combiner_median(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.median_combine() + c = MedianCombiner(ccd_list) + ccd = c.combiner_method() assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu @@ -197,8 +202,8 @@ def test_combiner_median(ccd_data): # test that the average combination works and returns a ccddata object def test_combiner_average(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.average_combine() + c = AverageCombiner(ccd_list) + ccd = c.combiner_method() assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu @@ -208,8 +213,8 @@ def test_combiner_average(ccd_data): # test that the sum combination works and returns a ccddata object def test_combiner_sum(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.sum_combine() + c = SumCombiner(ccd_list) + ccd = c.combiner_method() assert isinstance(ccd, CCDData) assert ccd.shape == (100, 100) assert ccd.unit == u.adu @@ -223,8 +228,8 @@ def test_combiner_mask_average(): mask = (data == 0) ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] - c = Combiner(ccd_list) - ccd = c.average_combine() + c = AverageCombiner(ccd_list) + ccd = c.combiner_method() assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 assert ccd.mask[0, 0] @@ -236,31 +241,34 @@ def test_combiner_with_scaling(ccd_data): # whose average is 1. ccd_data_lower = ccd_data.multiply(3) ccd_data_higher = ccd_data.multiply(0.9) - combiner = Combiner([ccd_data, ccd_data_higher, ccd_data_lower]) + combiner1 = AverageCombiner([ccd_data, ccd_data_higher, ccd_data_lower]) # scale each array to the mean of the first image scale_by_mean = lambda x: ccd_data.data.mean()/np.ma.average(x) - combiner.scaling = scale_by_mean - avg_ccd = combiner.average_combine() + combiner1.scaling = scale_by_mean + avg_ccd = combiner1.combiner_method() # Does the mean of the scaled arrays match the value to which it was # scaled? np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape - median_ccd = combiner.median_combine() + combiner2 = MedianCombiner([ccd_data, ccd_data_higher, ccd_data_lower]) + scale_by_mean = lambda x: ccd_data.data.mean() / np.ma.average(x) + combiner1.scaling = scale_by_mean + median_ccd = combiner2.combiner_method() # Does median also scale to the correct value? np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] - avg_ccd = combiner.average_combine() + combiner1.scaling = [scale_by_mean(combiner1.data_arr[i]) for i in range(3)] + avg_ccd = combiner1.combiner_method() np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape def test_combiner_scaling_fails(ccd_data): - combiner = Combiner([ccd_data, ccd_data.copy()]) + combiner = SumCombiner([ccd_data, ccd_data.copy()]) # Should fail unless scaling is set to a function or list-like with pytest.raises(TypeError): combiner.scaling = 5 @@ -273,8 +281,8 @@ def test_combiner_mask_median(): mask = (data == 0) ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] - c = Combiner(ccd_list) - ccd = c.median_combine() + c = MedianCombiner(ccd_list) + ccd = c.combiner_method() assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 1 assert ccd.mask[0, 0] @@ -288,8 +296,8 @@ def test_combiner_mask_sum(): mask = (data == 0) ccd = CCDData(data, unit=u.adu, mask=mask) ccd_list = [ccd, ccd, ccd] - c = Combiner(ccd_list) - ccd = c.sum_combine() + c = SumCombiner(ccd_list) + ccd = c.combiner_method() assert ccd.data[0, 0] == 0 assert ccd.data[5, 5] == 3 assert ccd.mask[0, 0] @@ -301,8 +309,8 @@ def test_combine_average_fitsimages(): fitsfile = get_pkg_data_filename('data/a8280271.fits') ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 3 - c = Combiner(ccd_list) - ccd_by_combiner = c.average_combine() + c = AverageCombiner(ccd_list) + ccd_by_combiner = c.combiner_method() fitsfilename_list = [fitsfile] * 3 avgccd = combine(fitsfilename_list, output_file=None, @@ -319,9 +327,10 @@ def test_combine_numpyndarray(): """ fitsfile = get_pkg_data_filename('data/a8280271.fits') ccd = CCDData.read(fitsfile, unit=u.adu) + ccd_list = [ccd] * 3 - c = Combiner(ccd_list) - ccd_by_combiner = c.average_combine() + c = AverageCombiner(ccd_list) + ccd_by_combiner = c.combiner_method() fitsfilename_list = np.array([fitsfile] * 3) avgccd = combine(fitsfilename_list, output_file=None, @@ -354,8 +363,8 @@ def test_combine_average_ccddata(): fitsfile = get_pkg_data_filename('data/a8280271.fits') ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 3 - c = Combiner(ccd_list) - ccd_by_combiner = c.average_combine() + c = AverageCombiner(ccd_list) + ccd_by_combiner = c.combiner_method() avgccd = combine(ccd_list, output_file=None, method='average', unit=u.adu) # averaging same ccdData should give back same images @@ -368,8 +377,8 @@ def test_combine_limitedmem_fitsimages(): fitsfile = get_pkg_data_filename('data/a8280271.fits') ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 5 - c = Combiner(ccd_list) - ccd_by_combiner = c.average_combine() + c = AverageCombiner(ccd_list) + ccd_by_combiner = c.combiner_method() fitsfilename_list = [fitsfile] * 5 avgccd = combine(fitsfilename_list, output_file=None, method='average', @@ -384,11 +393,11 @@ def test_combine_limitedmem_scale_fitsimages(): fitsfile = get_pkg_data_filename('data/a8280271.fits') ccd = CCDData.read(fitsfile, unit=u.adu) ccd_list = [ccd] * 5 - c = Combiner(ccd_list) + c = AverageCombiner(ccd_list) # scale each array to the mean of the first image scale_by_mean = lambda x: ccd.data.mean()/np.ma.average(x) c.scaling = scale_by_mean - ccd_by_combiner = c.average_combine() + ccd_by_combiner = c.combiner_method() fitsfilename_list = [fitsfile] * 5 avgccd = combine(fitsfilename_list, output_file=None, method='average', @@ -401,8 +410,8 @@ def test_combine_limitedmem_scale_fitsimages(): # test the optional uncertainty function in average_combine def test_average_combine_uncertainty(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.average_combine(uncertainty_func=np.sum) + c = AverageCombiner(ccd_list, uncertainty_func=np.sum) + ccd = c.combiner_method() uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) np.testing.assert_array_equal(ccd.uncertainty.array, uncert_ref) @@ -417,8 +426,8 @@ def test_average_combine_uncertainty(ccd_data): # test the optional uncertainty function in median_combine def test_median_combine_uncertainty(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.median_combine(uncertainty_func=np.sum) + c = MedianCombiner(ccd_list, uncertainty_func=np.sum) + ccd = c.combiner_method() uncert_ref = np.sum(c.data_arr, 0) / np.sqrt(3) np.testing.assert_array_equal(ccd.uncertainty.array, uncert_ref) @@ -433,8 +442,8 @@ def test_median_combine_uncertainty(ccd_data): # test the optional uncertainty function in sum_combine def test_sum_combine_uncertainty(ccd_data): ccd_list = [ccd_data, ccd_data, ccd_data] - c = Combiner(ccd_list) - ccd = c.sum_combine(uncertainty_func=np.sum) + c = SumCombiner(ccd_list, uncertainty_func=np.sum) + ccd = c.combiner_method() uncert_ref = np.sum(c.data_arr, 0) * np.sqrt(3) np.testing.assert_almost_equal(ccd.uncertainty.array, uncert_ref) @@ -449,8 +458,8 @@ def test_sum_combine_uncertainty(ccd_data): def test_combiner_uncertainty_average(): ccd_list = [CCDData(np.ones((10, 10)), unit=u.adu), CCDData(np.ones((10, 10)) * 2, unit=u.adu)] - c = Combiner(ccd_list) - ccd = c.average_combine() + c = AverageCombiner(ccd_list) + ccd = c.combiner_method() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) / 2 # Correction because we combined two images. @@ -467,8 +476,8 @@ def test_combiner_uncertainty_average_mask(): ccd_list = [ccd_with_mask, CCDData(np.ones((10, 10)) * 2, unit=u.adu), CCDData(np.ones((10, 10)) * 3, unit=u.adu)] - c = Combiner(ccd_list) - ccd = c.average_combine() + c = AverageCombiner(ccd_list) + ccd = c.combiner_method() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) # Correction because we combined two images. @@ -487,8 +496,8 @@ def test_combiner_uncertainty_median_mask(): ccd_list = [ccd_with_mask, CCDData(np.ones((10, 10)) * 2, unit=u.adu), CCDData(np.ones((10, 10)) * 3, unit=u.adu)] - c = Combiner(ccd_list) - ccd = c.median_combine() + c = MedianCombiner(ccd_list) + ccd = c.combiner_method() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3]) # Correction because we combined two images. @@ -507,8 +516,8 @@ def test_combiner_uncertainty_sum_mask(): ccd_list = [ccd_with_mask, CCDData(np.ones((10, 10)) * 2, unit=u.adu), CCDData(np.ones((10, 10)) * 3, unit=u.adu)] - c = Combiner(ccd_list) - ccd = c.sum_combine() + c = SumCombiner(ccd_list) + ccd = c.combiner_method() # Just the standard deviation of ccd data. ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3]) ref_uncertainty *= np.sqrt(3) @@ -524,11 +533,11 @@ def test_combiner_3d(): ccd_list = [data1, data2, data3] - c = Combiner(ccd_list) + c = AverageCombiner(ccd_list) assert c.data_arr.shape == (3, 5, 5, 5) assert c.data_arr.mask.shape == (3, 5, 5, 5) - ccd = c.average_combine() + ccd = c.combiner_method() assert ccd.shape == (5, 5, 5) np.testing.assert_array_almost_equal(ccd.data, data1, decimal=4) @@ -536,27 +545,32 @@ def test_combiner_3d(): def test_3d_combiner_with_scaling(ccd_data): # The factors below are not particularly important; just avoid anything # whose average is 1. - ccd_data = CCDData(np.ones((5, 5, 5)), unit=u.adu) + ccd_data = CCDData(np.ones((5 , 5, 5)), unit=u.adu) ccd_data_lower = CCDData(3 * np.ones((5, 5, 5)), unit=u.adu) ccd_data_higher = CCDData(0.9 * np.ones((5, 5, 5)), unit=u.adu) - combiner = Combiner([ccd_data, ccd_data_higher, ccd_data_lower]) + combiner1 = AverageCombiner([ccd_data, ccd_data_higher, ccd_data_lower]) # scale each array to the mean of the first image scale_by_mean = lambda x: ccd_data.data.mean()/np.ma.average(x) - combiner.scaling = scale_by_mean - avg_ccd = combiner.average_combine() + combiner1.scaling = scale_by_mean + avg_ccd = combiner1.combiner_method() # Does the mean of the scaled arrays match the value to which it was # scaled? np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape - median_ccd = combiner.median_combine() + + combiner2 = MedianCombiner([ccd_data, ccd_data_higher, ccd_data_lower]) + # scale each array to the mean of the first image + scale_by_mean = lambda x: ccd_data.data.mean()/np.ma.average(x) + combiner2.scaling = scale_by_mean + median_ccd = combiner2.combiner_method() # Does median also scale to the correct value? np.testing.assert_almost_equal(np.median(median_ccd.data), np.median(ccd_data.data)) # Set the scaling manually... - combiner.scaling = [scale_by_mean(combiner.data_arr[i]) for i in range(3)] - avg_ccd = combiner.average_combine() + combiner1.scaling = [scale_by_mean(combiner1.data_arr[i]) for i in range(3)] + avg_ccd = combiner1.combiner_method() np.testing.assert_almost_equal(avg_ccd.data.mean(), ccd_data.data.mean()) assert avg_ccd.shape == ccd_data.shape @@ -569,21 +583,20 @@ def test_clip_extrema_3d(): CCDData(np.ones((3, 3, 3)) * 40., unit="adu"), CCDData(np.ones((3, 3, 3)) * 25., unit="adu"), CCDData(np.ones((3, 3, 3)) * 35., unit="adu"), - ] - c = Combiner(ccdlist) + ] + c = AverageCombiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) - result = c.average_combine() + result = c.combiner_method() expected = CCDData(np.ones((3, 3, 3)) * 30, unit="adu") np.testing.assert_array_equal(result, expected) -@pytest.mark.parametrize('comb_func', - ['average_combine', 'median_combine', 'sum_combine']) -def test_writeable_after_combine(ccd_data, tmpdir, comb_func): +@pytest.mark.parametrize('Comb_class', [AverageCombiner, MedianCombiner, SumCombiner]) +def test_writeable_after_combine(ccd_data, tmpdir, Comb_class): tmp_file = tmpdir.join('tmp.fits') - from ..combiner import Combiner - combined = Combiner([ccd_data for _ in range(3)]) - ccd2 = getattr(combined, comb_func)() + from ..combiner import AverageCombiner + Combiner = Comb_class([ccd_data for _ in range(3)]) + ccd2 = Combiner.combiner_method() # This should not fail because the resulting uncertainty has a mask ccd2.write(tmp_file.strpath) @@ -595,13 +608,13 @@ def test_clip_extrema(): CCDData(np.ones((3, 5)) * 40., unit="adu"), CCDData(np.ones((3, 5)) * 25., unit="adu"), CCDData(np.ones((3, 5)) * 35., unit="adu"), - ] - ccdlist[0].data[0, 1] = 3.1 - ccdlist[1].data[1, 2] = 100.1 - ccdlist[1].data[2, 0] = 100.1 - c = Combiner(ccdlist) + ] + ccdlist[0].data[0,1] = 3.1 + ccdlist[1].data[1,2] = 100.1 + ccdlist[1].data[2,0] = 100.1 + c = AverageCombiner(ccdlist) c.clip_extrema(nlow=1, nhigh=1) - result = c.average_combine() + result = c.combiner_method() expected = [[30.0, 22.5, 30.0, 30.0, 30.0], [30.0, 30.0, 47.5, 30.0, 30.0], [47.5, 30.0, 30.0, 30.0, 30.0]] @@ -633,21 +646,21 @@ def test_clip_extrema_with_other_rejection(): CCDData(np.ones((3, 5)) * 40., unit="adu"), CCDData(np.ones((3, 5)) * 25., unit="adu"), CCDData(np.ones((3, 5)) * 35., unit="adu"), - ] + ] ccdlist[0].data[0, 1] = 3.1 ccdlist[1].data[1, 2] = 100.1 ccdlist[1].data[2, 0] = 100.1 - c = Combiner(ccdlist) - # Reject ccdlist[1].data[1,2] by other means + c = AverageCombiner(ccdlist) + ## Reject ccdlist[1].data[1,2] by other means c.data_arr.mask[1, 1, 2] = True - # Reject ccdlist[1].data[1,2] by other means + ## Reject ccdlist[1].data[1,2] by other means c.data_arr.mask[3, 0, 0] = True c.clip_extrema(nlow=1, nhigh=1) - result = c.average_combine() - expected = [[80. / 3., 22.5, 30., 30., 30.], - [30., 30., 47.5, 30., 30.], - [47.5, 30., 30., 30., 30.]] + result = c.combiner_method() + expected = [[ 80. / 3., 22.5, 30. , 30., 30.], + [ 30. , 30. , 47.5, 30., 30.], + [ 47.5, 30. , 30. , 30., 30.]] np.testing.assert_array_equal(result, expected) From 244b2ec494210e237a68c30ea815c24eb68783f8 Mon Sep 17 00:00:00 2001 From: Sara Ogaz Date: Fri, 18 Jan 2019 16:11:02 -0500 Subject: [PATCH 2/2] whoops, some crud left from moving stuff around --- ccdproc/combiner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ccdproc/combiner.py b/ccdproc/combiner.py index c9e8a2a1..44b36a36 100644 --- a/ccdproc/combiner.py +++ b/ccdproc/combiner.py @@ -563,7 +563,6 @@ def combiner_method(self, combine_func=ma.sum, scale_to=None): # set up the data data = combine_func(self._get_scaled_data(scale_to), axis=0) - # set up the mask masked_values = self.data_arr.mask.sum(axis=0) mask = (masked_values == len(self.data_arr))