diff --git a/Project.toml b/Project.toml index cd910a8..bde999c 100644 --- a/Project.toml +++ b/Project.toml @@ -4,35 +4,42 @@ authors = ["Aman Pandey"] version = "0.1.0" [deps] -FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -ResumableFunctions = "c5292f4c-5179-55e1-98c5-05642aab7184" -StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -ProgressBars = "49802e3a-d2f1-5c88-81d8-b72133a6f568" +AbstractFFTs = "621f4979-c628-5d54-868e-fcf4e3e8185c" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" -Metadata = "4fb893c1-3164-4f58-823a-cb4c64eabb4f" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" FITSIO = "525bcba6-941b-5504-bd06-fd0dc1a4d2eb" +HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" Intervals = "d8418881-c3e1-53bb-8760-2df7ec849ed5" +JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" +NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +ProgressBars = "49802e3a-d2f1-5c88-81d8-b72133a6f568" +ResumableFunctions = "c5292f4c-5179-55e1-98c5-05642aab7184" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -julia = "1.7" -FFTW = "1.4" -Distributions = "0.25" -ResumableFunctions = "0.6" -StatsBase = "0.33" -ProgressBars = "1.4" DataFrames = "1.3" -Metadata = "0.3" -HDF5 = "0.16" +Distributions = "0.25" +FFTW = "1.4" FITSIO = "0.16" +HDF5 = "0.16" Intervals = "1.8" +JuliaFormatter = "1.0.62" NaNMath = "0.3, 1" +Plots = "1.8" +ProgressBars = "1.4" +ResumableFunctions = "0.6" +StatsBase = "0.33" +Test = "1.7, 1.8, 1.9, 1.10, 1.11" +julia = "1.11" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +ResumableFunctions = "c5292f4c-5179-55e1-98c5-05642aab7184" [targets] -test = ["Test"] +test = ["Test", "ResumableFunctions"] diff --git a/README.md b/README.md index 21cd3f2..12c0eae 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ # Stingray.jl -Julia porting of Stingray +Julia porting of Stingray \ No newline at end of file diff --git a/src/Stingray.jl b/src/Stingray.jl index 11746c0..a6ff22b 100644 --- a/src/Stingray.jl +++ b/src/Stingray.jl @@ -1,7 +1,7 @@ module Stingray using ResumableFunctions, StatsBase, Statistics, DataFrames -using FFTW, Metadata, NaNMath, FITSIO, Intervals +using FFTW, NaNMath, FITSIO, Intervals using ProgressBars: tqdm as show_progress include("fourier.jl") diff --git a/src/fourier.jl b/src/fourier.jl index d5f2e62..4b6b47a 100644 --- a/src/fourier.jl +++ b/src/fourier.jl @@ -195,7 +195,6 @@ function error_on_averaged_cross_spectrum( bsq = bias_term.(seg_power, ref_power, seg_power_noise, ref_power_noise, n_ave) frac = @. (Gsq - bsq) / (ref_power - ref_power_noise) power_over_2n = ref_power / two_n_ave - # Eq. 18 dRe = dIm = dG = @. NaNMath.sqrt(power_over_2n * (seg_power - frac)) # Eq. 19 @@ -277,7 +276,7 @@ end dt = nothing binned = !isnothing(fluxes) if binned - dt = Statistics.median(diff(@view times[1:100])) + dt = Statistics.median(diff(@view times[1:min(100, end)])) end fun = _which_segment_idx_fun(; binned, dt) @@ -286,19 +285,32 @@ end @yield nothing continue end + if !binned event_times = @view times[idx0:idx1-1] - cts = fit(Histogram, float.(event_times .- s); nbins = n_bin).weights + cts = + fit( + Histogram, + float.(event_times .- s), + range(0, segment_size, length = n_bin + 1), + ).weights + @yield NamedTuple{(:counts,)}((cts,)) + else - cts = float.(@view fluxes[idx0+1:idx1]) + counts = float.(@view fluxes[idx0+1:idx1]) if !isnothing(errors) - cts = cts, @view errors[idx0+1:idx1] + errors_slice = float.(@view errors[idx0+1:idx1]) + if length(counts) == length(errors_slice) + @yield NamedTuple{(:counts, :errors)}((counts, errors_slice)) + else + @yield nothing + end + else + @yield NamedTuple{(:counts,)}((counts,)) end end - @yield cts end end - function avg_pds_from_iterable( flux_iterable, dt::Real; @@ -306,11 +318,10 @@ function avg_pds_from_iterable( use_common_mean::Bool = true, silent::Bool = false, ) - local_show_progress = show_progress + local_show_progress = silent ? identity : show_progress if silent - local_show_progress = identity + local_show_progress = silent ? identity : show_progress end - # Initialize stuff cross = unnorm_cross = nothing n_ave = 0 @@ -321,16 +332,20 @@ function avg_pds_from_iterable( fgt0 = nothing n_bin = nothing - for flux in local_show_progress(flux_iterable) - if isnothing(flux) || all(iszero, flux) + for flux_tuple in local_show_progress(flux_iterable) + if isnothing(flux_tuple) || all(iszero, flux_tuple.counts) + continue end # If the iterable returns the uncertainty, use it to calculate the # variance. + flux = flux_tuple.counts variance = nothing - if flux isa Tuple - flux, err = flux + + if hasproperty(flux_tuple, :errors) + err = flux_tuple.errors + variance = Statistics.mean(err)^2 end @@ -346,7 +361,6 @@ function avg_pds_from_iterable( # Accumulate the sum of means and variances, to get the final mean and # variance the end sum_of_photons += n_ph - if !isnothing(variance) common_variance = sum_if_not_none_or_initialize(common_variance, variance) end @@ -356,18 +370,16 @@ function avg_pds_from_iterable( fgt0 = positive_fft_bins(n_bin) freq = fftfreq(n_bin, dt)[fgt0] end - # No need for the negative frequencies - keepat!(unnorm_power, fgt0) + unnorm_power_filtered = unnorm_power[fgt0] # If the user wants to normalize using the mean of the total # lightcurve, normalize it here - cs_seg = unnorm_power + cs_seg = unnorm_power_filtered if !(use_common_mean) mean = n_ph / n_bin - cs_seg = normalize_periodograms( - unnorm_power, + unnorm_power_filtered, dt, n_bin; mean_flux = mean, @@ -376,19 +388,17 @@ function avg_pds_from_iterable( variance = variance, ) end - # Accumulate the total sum cross spectrum cross = sum_if_not_none_or_initialize(cross, cs_seg) - unnorm_cross = sum_if_not_none_or_initialize(unnorm_cross, unnorm_power) + + unnorm_cross = sum_if_not_none_or_initialize(unnorm_cross, unnorm_power[fgt0]) n_ave += 1 end - # If there were no good intervals, return nothing if isnothing(cross) return nothing end - # Calculate the mean number of photons per chunk n_ph = sum_of_photons / n_ave # Calculate the mean number of photons per bin @@ -399,12 +409,11 @@ function avg_pds_from_iterable( # Hence M, not M*n_bin common_variance /= n_ave end - # Transform a sum into the average - unnorm_cross = unnorm_cross / n_ave - cross = cross / n_ave - - # Final normalization (If not done already!) + unnorm_cross ./= n_ave + cross ./= n_ave + # Finally, normalize the cross spectrum (only if not already done on an + # interval-to-interval basis if use_common_mean cross = normalize_periodograms( unnorm_cross, @@ -418,23 +427,10 @@ function avg_pds_from_iterable( end results = DataFrame() - results[!, "freq"] = freq - results[!, "power"] = cross - results[!, "unnorm_power"] = unnorm_cross - results = attach_metadata( - results, - ( - n = n_bin, - m = n_ave, - dt = dt, - norm = norm, - df = 1 / (dt * n_bin), - nphots = n_ph, - mean = common_mean, - variance = common_variance, - segment_size = dt * n_bin, - ), - ) + results.freq = freq + results.power = cross + results.unnorm_power = unnorm_cross + return results end @@ -448,20 +444,18 @@ function avg_cs_from_iterables_quick( unnorm_cross = unnorm_pds1 = unnorm_pds2 = nothing n_ave = 0 fgt0 = n_bin = freq = nothing - sum_of_photons1 = sum_of_photons2 = 0 - for (flux1, flux2) in zip(flux_iterable1, flux_iterable2) - if isnothing(flux1) || isnothing(flux2) || all(iszero, flux1) || all(iszero, flux2) + if isnothing(flux1) || + isnothing(flux2) || + all(iszero, flux1.counts) || + all(iszero, flux2.counts) continue end - - n_bin = length(flux1) - + n_bin = length(flux1.counts) # Calculate the sum of each light curve, to calculate the mean - n_ph1 = sum(flux1) - n_ph2 = sum(flux2) - + n_ph1 = sum(flux1.counts) + n_ph2 = sum(flux2.counts) # At the first loop, we define the frequency array and the range of # positive frequency bins (after the first loop, cross will not be # nothing anymore) @@ -469,32 +463,24 @@ function avg_cs_from_iterables_quick( freq = fftfreq(n_bin, dt) fgt0 = positive_fft_bins(n_bin) end - # Calculate the FFTs - ft1 = fft(flux1) - ft2 = fft(flux2) - + ft1 = fft(flux1.counts) + ft2 = fft(flux2.counts) # Calculate the unnormalized cross spectrum unnorm_power = ft1 .* conj.(ft2) - # Accumulate the sum to calculate the total mean of the lc sum_of_photons1 += n_ph1 sum_of_photons2 += n_ph2 - # Take only positive frequencies keepat!(unnorm_power, fgt0) - # Initialize or accumulate final averaged spectrum unnorm_cross = sum_if_not_none_or_initialize(unnorm_cross, unnorm_power) - n_ave += 1 end - # If no valid intervals were found, return only `nothing`s if isnothing(unnorm_cross) return nothing end - # Calculate the mean number of photons per chunk n_ph1 = sum_of_photons1 / n_ave n_ph2 = sum_of_photons2 / n_ave @@ -503,10 +489,8 @@ function avg_cs_from_iterables_quick( common_mean1 = n_ph1 / n_bin common_mean2 = n_ph2 / n_bin common_mean = n_ph / n_bin - # Transform the sums into averages unnorm_cross ./= n_ave - # Finally, normalize the cross spectrum (only if not already done on an # interval-to-interval basis) cross = normalize_periodograms( @@ -519,36 +503,36 @@ function avg_cs_from_iterables_quick( variance = nothing, power_type = "all", ) - # No negative frequencies freq = freq[fgt0] + # Create DataFrame with the computed data results = DataFrame() results[!, "freq"] = freq results[!, "power"] = cross results[!, "unnorm_power"] = unnorm_cross - results = attach_metadata( - results, - ( - n = n_bin, - m = n_ave, - dt = dt, - norm = norm, - df = 1 / (dt * n_bin), - nphots = n_ph, - nphots1 = n_ph1, - nphots2 = n_ph2, - variance = nothing, - mean = common_mean, - mean1 = common_mean1, - mean2 = common_mean2, - power_type = "all", - fullspec = false, - segment_size = dt * n_bin, - ), + + # Create NamedTuple with metadata instead of using attach_metadata + metadata = ( + n = n_bin, + m = n_ave, + dt = dt, + norm = norm, + df = 1 / (dt * n_bin), + nphots = n_ph, + nphots1 = n_ph1, + nphots2 = n_ph2, + variance = nothing, + mean = common_mean, + mean1 = common_mean1, + mean2 = common_mean2, + power_type = "all", + fullspec = false, + segment_size = dt * n_bin, ) - return results + # Return both DataFrame and metadata as a tuple + return (results, metadata) end function avg_cs_from_iterables( @@ -563,7 +547,7 @@ function avg_cs_from_iterables( return_auxil::Bool = false, ) - local_show_progress = show_progress + local_show_progress = silent ? identity : show_progress if silent local_show_progress = (a) -> a end @@ -576,19 +560,23 @@ function avg_cs_from_iterables( common_variance1 = common_variance2 = common_variance = nothing for (flux1, flux2) in local_show_progress(zip(flux_iterable1, flux_iterable2)) - if isnothing(flux1) || isnothing(flux2) || all(iszero, flux1) || all(iszero, flux2) + if isnothing(flux1) || + isnothing(flux2) || + all(iszero, flux1.counts) || + all(iszero, flux2.counts) + continue end # Does the flux iterable return the uncertainty? # If so, define the variances variance1 = variance2 = nothing - if flux1 isa Tuple - flux1, err1 = flux1 + if hasproperty(flux1, :errors) + err1 = flux1.errors variance1 = Statistics.mean(err1)^2 end - if flux2 isa Tuple - flux2, err2 = flux2 + if hasproperty(flux2, :errors) + err2 = flux2.errors variance2 = Statistics.mean(err2)^2 end @@ -600,7 +588,7 @@ function avg_cs_from_iterables( common_variance2 = sum_if_not_none_or_initialize(common_variance2, variance2) end - n_bin = length(flux1) + n_bin = length(flux1.counts) # At the first loop, we define the frequency array and the range of # positive frequency bins (after the first loop, cross will not be @@ -611,12 +599,12 @@ function avg_cs_from_iterables( end # Calculate the FFTs - ft1 = fft(flux1) - ft2 = fft(flux2) + ft1 = fft(flux1.counts) + ft2 = fft(flux2.counts) # Calculate the sum of each light curve, to calculate the mean - n_ph1 = sum(flux1) - n_ph2 = sum(flux2) + n_ph1 = sum(flux1.counts) + n_ph2 = sum(flux2.counts) n_ph = sqrt(n_ph1 * n_ph2) # Calculate the unnormalized cross spectrum @@ -737,8 +725,7 @@ function avg_cs_from_iterables( unnorm_pds2 ./= n_ave end - # Finally, normalize the cross spectrum (only if not already done on an - # interval-to-interval basis) + # Finally, normalize the cross spectrum (only if not already done on an interval-to-interval basis) if use_common_mean cross = normalize_periodograms( unnorm_cross, @@ -778,6 +765,7 @@ function avg_cs_from_iterables( freq = freq[fgt0] end + # Create DataFrame with the computed data results = DataFrame() results[!, "freq"] = freq results[!, "power"] = cross @@ -790,33 +778,31 @@ function avg_cs_from_iterables( results[!, "unnorm_pds2"] = unnorm_pds2 end - results = attach_metadata( - results, - ( - n = n_bin, - m = n_ave, - dt = dt, - norm = norm, - df = 1 / (dt * n_bin), - segment_size = dt * n_bin, - nphots = n_ph, - nphots1 = n_ph1, - nphots2 = n_ph2, - countrate1 = common_mean1 / dt, - countrate2 = common_mean2 / dt, - mean = common_mean, - mean1 = common_mean1, - mean2 = common_mean2, - power_type = power_type, - fullspec = fullspec, - variance = common_variance, - variance1 = common_variance1, - variance2 = common_variance2, - ), + # Create NamedTuple with metadata instead of using attach_metadata + metadata = ( + n = n_bin, + m = n_ave, + dt = dt, + norm = norm, + df = 1 / (dt * n_bin), + segment_size = dt * n_bin, + nphots = n_ph, + nphots1 = n_ph1, + nphots2 = n_ph2, + countrate1 = common_mean1 / dt, + countrate2 = common_mean2 / dt, + mean = common_mean, + mean1 = common_mean1, + mean2 = common_mean2, + power_type = power_type, + fullspec = fullspec, + variance = common_variance, + variance1 = common_variance1, + variance2 = common_variance2, ) - return results - + # Return both DataFrame and metadata as a tuple + return (results, metadata) end function avg_pds_from_events( @@ -830,8 +816,9 @@ function avg_pds_from_events( fluxes = nothing, errors = nothing, ) + if isnothing(segment_size) - segment_size = max(gti) - min(gti) + segment_size = maximum(gti) - minimum(gti) end n_bin = round(Int, segment_size / dt) dt = segment_size / n_bin @@ -840,21 +827,22 @@ function avg_pds_from_events( times, gti, segment_size; - n_bin, + n_bin = n_bin, fluxes = fluxes, errors = errors, ) - cross = avg_pds_from_iterable( + result = avg_pds_from_iterable( flux_iterable, dt, norm = norm, use_common_mean = use_common_mean, silent = silent, ) - if !isnothing(cross) - attach_metadata(cross, (gti = gti,)) + if isnothing(result) + return nothing end - return cross + + return DataFrame(result) end @@ -875,18 +863,18 @@ function avg_cs_from_events( errors2 = nothing, return_auxil = false, ) + if isnothing(segment_size) segment_size = max(gti) - min(gti) end n_bin = round(Int, segment_size / dt) - # adjust dt dt = segment_size / n_bin flux_iterable1 = get_flux_iterable_from_segments( times1, gti, segment_size; - n_bin, + n_bin = n_bin, fluxes = fluxes1, errors = errors1, ) @@ -894,7 +882,7 @@ function avg_cs_from_events( times2, gti, segment_size; - n_bin, + n_bin = n_bin, fluxes = fluxes2, errors = errors2, ) @@ -911,7 +899,6 @@ function avg_cs_from_events( ) results = avg_cs_from_iterables_quick(flux_iterable1, flux_iterable2, dt; norm = norm) - else results = avg_cs_from_iterables( flux_iterable1, @@ -925,8 +912,8 @@ function avg_cs_from_events( return_auxil = return_auxil, ) end - if !isnothing(results) - attach_metadata(results, (gti = gti,)) + if isnothing(results) + return nothing end - return results + return results[1] end diff --git a/src/utils.jl b/src/utils.jl index 285d493..44df168 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -1,8 +1,23 @@ -function sum_if_not_none_or_initialize(A, B) - if isnothing(A) - return deepcopy((B)) +# this function is returning a scalar sum instead of maintaining the array structure. +# this is to prevent array interface problem +function sum_if_not_none_or_initialize(current, new_value) + if isnothing(new_value) + return current # If new_value is nothing, just return current + end + + if isnothing(current) + return copy(new_value) # Initialize if current is nothing + end + + if isa(current, AbstractArray) && isa(new_value, AbstractArray) + return current .+ new_value # Element-wise addition for arrays + elseif isa(current, Number) && isa(new_value, Number) + return current + new_value # Simple number addition + else + error( + "sum_if_not_none_or_initialize: Type mismatch between current=$(typeof(current)) and new_value=$(typeof(new_value))", + ) end - return A + B end function contiguous_regions(condition::AbstractVector{Bool}) diff --git a/test/runtests.jl b/test/runtests.jl index a4460f4..47e04f0 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,6 +1,7 @@ using Stingray using Test -using FFTW, Distributions, Statistics, StatsBase, Metadata, HDF5 +using FFTW, Distributions, Statistics, StatsBase, HDF5 +using DataFrames include("test_fourier.jl") include("test_gti.jl") diff --git a/test/test_fourier.jl b/test/test_fourier.jl index e15ada1..3295d17 100644 --- a/test/test_fourier.jl +++ b/test/test_fourier.jl @@ -1,45 +1,34 @@ -function compare_tables(table1, table2; rtol = 0.001, discard = []) - - s_discard = Symbol.(discard) +function compare_tables( + table1::DataFrame, + table2::DataFrame; + rtol = 0.001, + discard = Vector{Symbol}(), +) + s_discard = Set(Symbol.(discard)) test_result = true - for key in propertynames(table1) - if key in s_discard - continue - end - oe, oc = getproperty(table1, key), getproperty(table1, key) - if oe isa Integer || oe isa String - if !(oe == oc) - test_result = false - break - end - elseif isnothing(oe) - if !(isnothing(oc)) + # Ensure both DataFrames have the same columns before comparison + common_columns = intersect(propertynames(table1), propertynames(table2)) + common_columns = setdiff(common_columns, s_discard) # Remove discarded columns + + for key in common_columns + oe, oc = getproperty(table1, key), getproperty(table2, key) + + if eltype(oe) <: Number && eltype(oc) <: Number + if !(≈(oe, oc, rtol = rtol)) test_result = false + @warn "Mismatch in column $key: Maximum difference $(maximum(abs.(oe .- oc)))" break end else - if !(≈(oe, oc, rtol = rtol)) + if !(oe == oc) test_result = false + @warn "Mismatch in column $key: Values do not match" break end end end - table1 = Metadata.drop_metadata(table1) - table2 = Metadata.drop_metadata(table2) - - for field in names(table1) - if field in discard - continue - end - oe, oc = table1[!, field], table2[!, field] - - if !(≈(oe, oc, rtol = rtol)) - test_result = false - break - end - end @test test_result end @@ -168,17 +157,18 @@ end @test fts_evts == fts_cts end - @testset "test_error_on_averaged_cross_spectrum_low_nave" for common_ref in - [true, false] - @test_logs (:warn, r"n_ave is below 30.") error_on_averaged_cross_spectrum( - [4 + 1.0im], - [2], - [4], - 29, - 2, - 2; - common_ref = common_ref, - ) + @testset "test_error_on_averaged_cross_spectrum_low_nave" begin + for common_ref in [true, false] + @test_logs (:warn, r"n_ave is below 30.") error_on_averaged_cross_spectrum( + [4 + 1.0im], + [2], + [4], + 29, + 2, + 2; + common_ref = common_ref, + ) + end end @testset "test_avg_pds_bad_input" begin @@ -187,29 +177,26 @@ end @test isnothing(out_ev) end - @testset "test_avg_cs_bad_input" for return_auxil in [true, false] - _times1 = rand(Uniform(0, 1000), 1) - _times2 = rand(Uniform(0, 1000), 1) - out_ev = avg_cs_from_events( - _times1, - _times2, - gti, - segment_size, - dt, - silent = true, - return_auxil = return_auxil, - ) - @test isnothing(out_ev) + @testset "test_avg_cs_bad_input" begin + for return_auxil in [true, false] + _times1 = rand(Uniform(0, 1000), 1) + _times2 = rand(Uniform(0, 1000), 1) + out_ev = avg_cs_from_events( + _times1, + _times2, + gti, + segment_size, + dt, + silent = true, + return_auxil = return_auxil, + ) + @test isnothing(out_ev) + end end - @testset "test_avg_pds_use_common_mean_similar_stats" for norm in [ - "frac", - "abs", - "none", - "leahy", - ] - out_comm = - avg_pds_from_events( + @testset "test_avg_pds_use_common_mean_similar_stats" begin + for norm in ["frac", "abs", "none", "leahy"] + out_comm = avg_pds_from_events( times, gti, segment_size, @@ -218,9 +205,9 @@ end use_common_mean = true, silent = true, fluxes = nothing, - ).power - out = - avg_pds_from_events( + ) + + out = avg_pds_from_events( times, gti, segment_size, @@ -229,21 +216,19 @@ end use_common_mean = false, silent = true, fluxes = nothing, - ).power - @test std(out_comm) ≈ std(out) rtol = 0.1 + ) + + @test !isnothing(out_comm) && !isnothing(out) + @test std(out_comm.power) ≈ std(out.power) rtol = 0.1 + end end - @testset "test_avg_cs_use_common_mean_similar_stats" for norm in [ - "frac", - "abs", - "none", - "leahy", - ], - return_auxil in [true, false], - fullspec in [true, false] - - out_comm = - avg_cs_from_events( + @testset "test_avg_cs_use_common_mean_similar_stats" begin + for norm in ["frac", "abs", "none", "leahy"], + return_auxil in [true, false], + fullspec in [true, false] + + out_comm = avg_cs_from_events( times, times2, gti, @@ -254,9 +239,11 @@ end silent = true, fullspec = fullspec, return_auxil = return_auxil, - ).power - out = - avg_cs_from_events( + )[ + !, + :power, + ] + out = avg_cs_from_events( times, times2, gti, @@ -267,135 +254,132 @@ end silent = true, fullspec = fullspec, return_auxil = return_auxil, - ).power - @test std(out_comm) ≈ std(out) rtol = 0.1 + )[ + !, + :power, + ] + @test std(out_comm) ≈ std(out) rtol = 0.1 + end end - @testset "test_avg_pds_cts_and_events_are_equal" for use_common_mean in [true, false], - norm in ["frac", "abs", "none", "leahy"] - - out_ev = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes = nothing, - ) - out_ct = avg_pds_from_events( - bin_times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes = counts, - ) - compare_tables(out_ev, out_ct) + @testset "test_avg_pds_cts_and_events_are_equal" begin + for use_common_mean in [true, false], norm in ["frac", "abs", "none", "leahy"] + out_ev = avg_pds_from_events( + times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes = nothing, + ) + out_ct = avg_pds_from_events( + bin_times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes = counts, + ) + compare_tables(out_ev, out_ct) + end end - - @testset "test_avg_pds_cts_and_err_and_events_are_equal" for use_common_mean in - [true, false], - norm in ["frac", "abs", "none", "leahy"] - - out_ev = avg_pds_from_events( - times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes = nothing, - ) - out_ct = avg_pds_from_events( - bin_times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes = counts, - errors = errs, - ) - # The variance is not _supposed_ to be equal, when we specify errors - if use_common_mean - compare_tables(out_ev, out_ct, rtol = 0.01, discard = ["variance"]) - else - compare_tables(out_ev, out_ct, rtol = 0.1, discard = ["variance"]) + @testset "test_avg_pds_cts_and_err_and_events_are_equal" begin + for use_common_mean in [true, false], norm in ["frac", "abs", "none", "leahy"] + out_ev = avg_pds_from_events( + times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes = nothing, + ) + out_ct = avg_pds_from_events( + bin_times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes = counts, + errors = errs, + ) + if use_common_mean + compare_tables(out_ev, out_ct, rtol = 0.01, discard = ["variance"]) + else + compare_tables(out_ev, out_ct, rtol = 0.1, discard = ["variance"]) + end end end - - @testset "test_avg_cs_cts_and_events_are_equal" for use_common_mean in [true, false], - norm in ["frac", "abs", "none", "leahy"] - - out_ev = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - ) - out_ct = avg_cs_from_events( - bin_times, - bin_times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes1 = counts, - fluxes2 = counts2, - ) - if use_common_mean - compare_tables(out_ev, out_ct, rtol = 0.01) - else - compare_tables(out_ev, out_ct, rtol = 0.1) + @testset "test_avg_cs_cts_and_events_are_equal" begin + for use_common_mean in [true, false], norm in ["frac", "abs", "none", "leahy"] + out_ev = avg_cs_from_events( + times, + times2, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + ) + out_ct = avg_cs_from_events( + bin_times, + bin_times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes1 = counts, + fluxes2 = counts2, + ) + if use_common_mean + compare_tables(out_ev, out_ct, rtol = 0.01) + else + compare_tables(out_ev, out_ct, rtol = 0.1) + end end end - - @testset "test_avg_cs_cts_and_err_and_events_are_equal" for use_common_mean in - [true, false], - norm in ["frac", "abs", "none", "leahy"] - - out_ev = avg_cs_from_events( - times, - times2, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - ) - out_ct = avg_cs_from_events( - bin_times, - bin_times, - gti, - segment_size, - dt, - norm = norm, - use_common_mean = use_common_mean, - silent = true, - fluxes1 = counts, - fluxes2 = counts2, - errors1 = errs, - errors2 = errs2, - ) - discard = [m for m in propertynames(out_ev) if m == :variance] - - if use_common_mean - compare_tables(out_ev, out_ct, rtol = 0.01, discard = discard) - else - compare_tables(out_ev, out_ct, rtol = 0.1, discard = discard) + @testset "test_avg_cs_cts_and_err_and_events_are_equal" begin + for use_common_mean in [true, false], norm in ["frac", "abs", "none", "leahy"] + out_ev = avg_cs_from_events( + times, + times2, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + ) + out_ct = avg_cs_from_events( + bin_times, + bin_times, + gti, + segment_size, + dt, + norm = norm, + use_common_mean = use_common_mean, + silent = true, + fluxes1 = counts, + fluxes2 = counts2, + errors1 = errs, + errors2 = errs2, + ) + discard = [m for m in propertynames(out_ev) if m == :variance] + if use_common_mean + compare_tables(out_ev, out_ct, rtol = 0.01, discard = discard) + else + compare_tables(out_ev, out_ct, rtol = 0.1, discard = discard) + end end end end @@ -415,8 +399,8 @@ end pois_abs = poisson_level("abs", meanrate = meanrate) pois_frac = poisson_level("frac", meanrate = meanrate) - @test Statistics.mean(keepat!(pdsabs, good)) ≈ pois_abs rtol = 0.02 - @test Statistics.mean(keepat!(pdsfrac, good)) ≈ pois_frac rtol = 0.02 + @test Statistics.mean(view(pdsabs, good)) ≈ pois_abs rtol = 0.02 + @test Statistics.mean(view(pdsfrac, good)) ≈ pois_frac rtol = 0.02 mean = var = 100000.0 N = 800000 @@ -427,13 +411,13 @@ end meanrate = mean / dt lc = rand(Poisson(mean), N) nph = sum(lc) - pds = keepat!(abs2.(fft(lc)), good) + pds = view(abs2.(fft(lc)), good) lc_bksub = lc .- mean - pds_bksub = keepat!(abs2.(fft(lc_bksub)), good) + pds_bksub = view(abs2.(fft(lc_bksub)), good) lc_renorm = lc / mean - pds_renorm = keepat!(abs2.(fft(lc_renorm)), good) + pds_renorm = view(abs2.(fft(lc_renorm)), good) lc_renorm_bksub = lc_renorm .- 1 - pds_renorm_bksub = keepat!(abs2.(fft(lc_renorm_bksub)), good) + pds_renorm_bksub = view(abs2.(fft(lc_renorm_bksub)), good) @testset "test_leahy_bksub_var_vs_standard" begin leahyvar = normalize_leahy_from_variance(pds_bksub, Statistics.var(lc_bksub), N) @@ -441,24 +425,29 @@ end ratio = Statistics.mean(leahyvar ./ leahy) @test ratio ≈ 1 rtol = 0.01 end + @testset "test_abs_bksub" begin ratio = normalize_abs(pds_bksub, dt, N) ./ normalize_abs(pds, dt, N) @test Statistics.mean(ratio) ≈ 1 rtol = 0.01 end + @testset "test_frac_renorm_constant" begin ratio = normalize_frac(pds_renorm, dt, N, 1) ./ normalize_frac(pds, dt, N, mean) @test Statistics.mean(ratio) ≈ 1 rtol = 0.01 end + @testset "test_frac_to_abs_ctratesq" begin ratio = (normalize_frac(pds, dt, N, mean) ./ normalize_abs(pds, dt, N) .* meanrate .^ 2) @test Statistics.mean(ratio) ≈ 1 rtol = 0.01 end + @testset "test_total_variance" begin vdk_total_variance = sum((lc .- mean) .^ 2) ratio = Statistics.mean(pds) / vdk_total_variance @test Statistics.mean(ratio) ≈ 1 rtol = 0.01 end + @testset "test_poisson_level_$(norm)" for norm in ["abs", "frac", "leahy", "none"] pdsnorm = normalize_periodograms(pds, dt, N; mean_flux = mean, n_ph = nph, norm = norm)