diff --git a/mkgu_packaging/dicarlo/marques/__init__.py b/mkgu_packaging/dicarlo/marques/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mkgu_packaging/dicarlo/marques/marques_cavanaugh2002a.py b/mkgu_packaging/dicarlo/marques/marques_cavanaugh2002a.py new file mode 100644 index 0000000..05488fd --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_cavanaugh2002a.py @@ -0,0 +1,82 @@ + +import numpy as np +from marques_utils import gen_sample +from brainio_collection.packaging import package_data_assembly +from brainio_base.assemblies import DataAssembly + +ASSEMBLY_NAME = 'movshon.Cavanaugh2002a' +SIZE_STIM_NAME = 'dicarlo.Marques2020_size' + +PROPERTY_NAMES = ['surround_suppression_index', 'strongly_suppressed', 'grating_summation_field', 'surround_diameter', + 'surround_grating_summation_field_ratio'] + + +def collect_data(): + n_neurons = 190 + + # Surround suppression index (n=190 neurons, foveal eccentricity, response > 5spk/s) + surround_suppression_index_bins = np.linspace(0, 2, num=11) + surround_suppression_index_hist = np.array([66, 44, 38, 23, 16, 3, 0, 0, 0, 0]) + surround_suppression_index = gen_sample(surround_suppression_index_hist, surround_suppression_index_bins, + scale='linear') + + # Strongly suppressed (n=190 neurons, foveal eccentricity, response > 5spk/s) + strongly_suppressed_bins = np.linspace(0, 1, num=3) + strongly_suppressed_hist = np.array([42, 148]) + strongly_suppressed = np.concatenate((np.zeros(strongly_suppressed_hist[0]), + np.ones(strongly_suppressed_hist[1]))).reshape((-1, 1)) + + # Grating summation field (n=148 neurons & suppression > 10%) + grating_summation_field_bins = np.array([0.0625, 0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0]) + grating_summation_field_hist = np.array([0, 9, 30, 55, 42, 10, 2, 0]) + grating_summation_field = gen_sample(grating_summation_field_hist, grating_summation_field_bins, scale='log2') + filler = np.zeros((n_neurons - grating_summation_field_hist.sum(), 1)) + filler[:] = np.nan + grating_summation_field = np.concatenate((filler, grating_summation_field)) + + # Surround diameter (n=148 neurons & suppression > 10%) + surround_diameter_bins = np.array([0.0625, 0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0]) + surround_diameter_hist = np.array([0, 0, 2, 20, 33, 49, 40, 4]) + surround_diameter = gen_sample(surround_diameter_hist, surround_diameter_bins, scale='log2') + filler = np.zeros((n_neurons - surround_diameter_hist.sum(), 1)) + filler[:] = np.nan + surround_diameter = np.concatenate((filler, surround_diameter)) + + # Surround diameter / Grating summation field (n=148 neurons, foveal eccentricity & suppression > 10%) + surround_grating_summation_field_ratio_bins = np.array([1, 2, 4, 8, 16, 32]) + surround_grating_summation_field_ratio_hist = np.array([69, 38, 31, 10, 0]) + surround_grating_summation_field_ratio = gen_sample(surround_grating_summation_field_ratio_hist, + surround_grating_summation_field_ratio_bins, scale='log2') + filler = np.zeros((n_neurons - surround_grating_summation_field_ratio_hist.sum(), 1)) + filler[:] = np.nan + surround_grating_summation_field_ratio = np.concatenate((filler, surround_grating_summation_field_ratio)) + + # Create DataAssembly with single neuronal properties and bin information + assembly = np.concatenate((surround_suppression_index, strongly_suppressed, grating_summation_field, + surround_diameter, surround_grating_summation_field_ratio), axis=1) + + assembly = DataAssembly(assembly, coords={'neuroid_id': ('neuroid', range(assembly.shape[0])), + 'region': ('neuroid', ['V1'] * assembly.shape[0]), + 'neuronal_property': PROPERTY_NAMES}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data() + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, + stimulus_set_identifier=SIZE_STIM_NAME, assembly_class='PropertyAssembly', + bucket_name='brainio.contrib') + + +if __name__ == '__main__': + main() + diff --git a/mkgu_packaging/dicarlo/marques/marques_devalois1982a.py b/mkgu_packaging/dicarlo/marques/marques_devalois1982a.py new file mode 100644 index 0000000..fea27bc --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_devalois1982a.py @@ -0,0 +1,43 @@ + +import numpy as np +from marques_utils import gen_sample +from brainio_collection.packaging import package_data_assembly +from brainio_base.assemblies import DataAssembly + +ASSEMBLY_NAME = 'devalois.DeValois1982a' +ORIENTATION_STIM_NAME = 'dicarlo.Marques2020_orientation' + + +def collect_data(): + # Preferred orientation data + preferred_orientation_hist = np.array([110, 83, 100, 92]) + preferred_orientation_bins = np.linspace(-22.5, 157.5, 5) + + preferred_orientation = gen_sample(preferred_orientation_hist, preferred_orientation_bins, scale='linear') + + # Create DataAssembly with single neuronal properties and bin information + assembly = DataAssembly(preferred_orientation, coords={'neuroid_id': ('neuroid', + range(preferred_orientation.shape[0])), + 'region': ('neuroid', ['V1'] * preferred_orientation.shape[0]), + 'neuronal_property': ['preferred_orientation']}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data() + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=ORIENTATION_STIM_NAME, + assembly_class='PropertyAssembly', bucket_name='brainio.contrib') + + +if __name__ == '__main__': + main() + diff --git a/mkgu_packaging/dicarlo/marques/marques_devalois1982b.py b/mkgu_packaging/dicarlo/marques/marques_devalois1982b.py new file mode 100644 index 0000000..58f6221 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_devalois1982b.py @@ -0,0 +1,58 @@ + +import numpy as np +from marques_utils import gen_sample +from brainio_collection.packaging import package_data_assembly +from brainio_base.assemblies import DataAssembly + +ASSEMBLY_NAME = 'devalois.DeValois1982b' +SPATIAL_FREQUENCY_STIM_NAME = 'dicarlo.Marques2020_spatial_frequency' + + +def collect_data(): + # Peak spatial frequency data + peak_spatial_frequency_bins = np.array([0.35, 0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8.0, 11.2, 16.0, 22.4]) + # peak_spatial_frequency_simple_hist = np.array([0, 4, 4, 8, 25, 33, 26, 28, 12, 5, 2, 1]) + # peak_spatial_frequency_complex_hist = np.array([0, 0, 0, 9, 9, 7, 10, 23, 12, 8, 3, 3]) + # peak_spatial_frequency_hist = (peak_spatial_frequency_simple_hist + peak_spatial_frequency_complex_hist) + # + # peak_spatial_frequency = gen_sample(peak_spatial_frequency_hist, peak_spatial_frequency_bins, scale='log2') + + peak_spatial_frequency_simple_foveal_hist = np.array([0, 4, 4, 8, 25, 33, 26, 28, 12, 5, 2, 1]) + peak_spatial_frequency_complex_foveal_hist = np.array([0, 0, 0, 9, 9, 7, 10, 23, 12, 8, 3, 3]) + peak_spatial_frequency_simple_parafoveal_hist = np.array([2, 4, 10, 12, 18, 7, 18, 3, 4, 0, 0, 0]) + peak_spatial_frequency_complex_parafoveal_hist = np.array([1, 2, 1, 2, 5, 15, 13, 9, 3, 2, 0, 0]) + + peak_spatial_frequency_hist = ( + peak_spatial_frequency_simple_foveal_hist + peak_spatial_frequency_complex_foveal_hist + + peak_spatial_frequency_simple_parafoveal_hist + peak_spatial_frequency_complex_parafoveal_hist) + + peak_spatial_frequency = gen_sample(peak_spatial_frequency_hist, peak_spatial_frequency_bins, scale='log2') + + # Create DataAssembly with single neuronal properties and bin information + assembly = DataAssembly(peak_spatial_frequency, coords={'neuroid_id': ('neuroid', + range(peak_spatial_frequency.shape[0])), + 'region': ('neuroid', ['V1'] * + peak_spatial_frequency.shape[0]), + 'neuronal_property': ['peak_spatial_frequency']}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data() + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, + stimulus_set_identifier=SPATIAL_FREQUENCY_STIM_NAME, assembly_class='PropertyAssembly', + bucket_name='brainio.contrib') + + +if __name__ == '__main__': + main() + diff --git a/mkgu_packaging/dicarlo/marques/marques_freemanZiemba2013.py b/mkgu_packaging/dicarlo/marques/marques_freemanZiemba2013.py new file mode 100644 index 0000000..f8d8850 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_freemanZiemba2013.py @@ -0,0 +1,251 @@ + +import numpy as np +from brainio_collection.packaging import package_data_assembly +import h5py +from brainio_base.assemblies import DataAssembly + +DATA_DIR = '/braintree/data2/active/users/tmarques/bs_datasets/FreemanZiemba2013_V1V2data.mat' +ASSEMBLY_NAME = 'movshon.FreemanZiemba2013_V1_properties' +TEXTURE_STIM_NAME = 'movshon.FreemanZiemba2013_properties' + +PROPERTY_NAMES = ['texture_modulation_index', 'absolute_texture_modulation_index', 'texture_selectivity', + 'noise_selectivity', 'texture_sparseness', 'noise_sparseness', 'variance_ratio', 'sample_variance', + 'family_variance', 'max_texture', 'max_noise', ] + + +def collect_data(data_dir): + response_file = h5py.File(data_dir, 'r') + + texture_modulation_index, absolute_texture_modulation_index, texture_selectivity, noise_selectivity, \ + texture_sparseness, noise_sparseness, variance_ratio, sample_variance, family_variance, max_texture, max_noise = \ + calculate_texture_properties(response_file, area='v1') + + # Bins + max_texture_bins = np.logspace(-1, 3, 13, base=10) + max_noise_bins = np.logspace(-1, 3, 13, base=10) + texture_modulation_index_bins = np.linspace(-1, 1, num=25) + absolute_texture_modulation_index_bins = np.linspace(0, 1, num=13) + texture_selectivity_bins = np.linspace(0, 1, num=11) + noise_selectivity_bins = np.linspace(0, 1, num=11) + texture_sparseness_bins = np.linspace(0, 1, num=11) + noise_sparseness_bins = np.linspace(0, 1, num=11) + variance_ratio_bins = np.logspace(-2, 1, num=13) + sample_variance_bins = np.linspace(0, 1, num=11) + family_variance_bins = np.linspace(0, 1, num=11) + + # Create DataAssembly with single neuronal properties and bin information + assembly = np.concatenate((texture_modulation_index, absolute_texture_modulation_index, texture_selectivity, + noise_selectivity, texture_sparseness, noise_sparseness, variance_ratio, sample_variance, + family_variance, max_texture, max_noise), axis=1) + + assembly = DataAssembly(assembly, coords={'neuroid_id': ('neuroid', range(assembly.shape[0])), + 'region': ('neuroid', ['V1'] * assembly.shape[0]), + 'neuronal_property': PROPERTY_NAMES}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data(DATA_DIR) + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=TEXTURE_STIM_NAME, + assembly_class='PropertyAssembly', bucket_name='brainio.contrib') + + +def calculate_texture_properties(response_file, area='v1'): + textureNumOrder = np.array([327, 336, 393, 402, 13, 18, 23, 30, 38, 48, 52, 56, 60, 71, 99]) + familyNumOrder = np.array([60, 56, 13, 48, 71, 18, 327, 336, 402, 38, 23, 52, 99, 393, 30]) + textureSorted = [np.argwhere(textureNumOrder == familyNumOrder[i])[0][0] for i in range(len(familyNumOrder))] + + responses = response_file[area].value + # (cellNum) x (timeBin) x (rep) x (sample) x (texType) x (texFamily) + # (102+) x (300) x (20) x (15) x (2) x (15) + + responses = responses[:, :, :, :, :, textureSorted] + responses = np.moveaxis(responses, 4, 2) # texType to position 2 + responses = np.moveaxis(responses, 5, 3) # family to position 3 + responses = np.moveaxis(responses, 5, 4) # sample to position 4 + # (cellNum) x (timeBin) x (texType) x (texFamily) x (sample) x (rep) + + n_neurons, n_tb, n_type, n_fam, n_smp, n_rep = responses.shape + + responses_stabilized = np.copy(responses) # copies data for treating as stabilized spike counts + + # Calculates average responses in 10ms timebins (in spike/s) + responses = responses.reshape(n_neurons, 30, 10, n_type, n_fam, n_smp, n_rep).mean(axis=2) * 1000 + + n_tb = responses.shape[1] + sample_responses = responses.mean(axis=5) # average across repetitions + family_responses = sample_responses.mean(axis=4) # average across samples + type_responses = family_responses.mean(axis=3) # average across types + mean_responses = type_responses.mean(axis=2) # average across all + baseline = mean_responses.min(axis=1, keepdims=True) # baseline response + + latency = calculate_latency(family_responses) # gets latency using sample averages + sample_responses = calculate_mean_response(sample_responses, lat=latency) # mean response during 100ms + sample_responses = sample_responses - baseline.reshape(-1, 1, 1, 1) # subtracts baseline response and rectifies + sample_responses[sample_responses < 0] = 0 + family_responses = sample_responses.mean(axis=3) + max_response = sample_responses.reshape((n_neurons, n_type, -1)).max(axis=2) + + # Sums spikes in 10ms timebins + responses_stabilized = responses_stabilized.reshape(n_neurons, 30, 10, n_type, n_fam, n_smp, n_rep).sum(axis=2) + responses_stabilized = calculate_mean_response(responses_stabilized, lat=latency) * 10 + responses_stabilized = np.sqrt(responses_stabilized) + np.sqrt(responses_stabilized + 1) + responses_stabilized = responses_stabilized.mean(axis=4) + + # Maximum texture response + max_texture = max_response[:, 1].reshape((-1, 1)) + + # Maximum noise response + max_noise = max_response[:, 0].reshape((-1, 1)) + + texture_modulation_index = np.zeros((n_neurons, 1)) + texture_selectivity = np.zeros((n_neurons, 1)) + noise_selectivity = np.zeros((n_neurons, 1)) + texture_sparseness = np.zeros((n_neurons, 1)) + noise_sparseness = np.zeros((n_neurons, 1)) + variance_ratio = np.zeros((n_neurons, 1)) + sample_variance = np.zeros((n_neurons, 1)) + family_variance = np.zeros((n_neurons, 1)) + + for n in range(n_neurons): + # Texture modulation + texture_modulation_index[n] = calculate_texture_modulation(family_responses[n])[0] + # Texture selectivity (sparseness over texture families) + texture_selectivity[n] = calculate_sparseness(family_responses[n, 1]) + # Noise selectivity (sparseness over noise families) + noise_selectivity[n] = calculate_sparseness(family_responses[n, 0]) + # Texture sparseness (sparseness over texture samples) + texture_sparseness[n] = calculate_sparseness(sample_responses[n, 1]) + # Noise sparseness (sparseness over noise samples) + noise_sparseness[n] = calculate_sparseness(sample_responses[n, 0]) + variance_ratio[n], sample_variance[n], family_variance[n] = calculate_variance_ratio(responses_stabilized[n, 1]) + + absolute_texture_modulation_index = np.abs(texture_modulation_index) + + return texture_modulation_index, absolute_texture_modulation_index, texture_selectivity, noise_selectivity,\ + texture_sparseness, noise_sparseness, variance_ratio, sample_variance, family_variance, max_texture, \ + max_noise + + +def calculate_mean_response(response, lat, resp_nbin=10): + + response_shape = response.shape + n_neur = response_shape[0] + mean_response = np.zeros(np.concatenate((np.array([n_neur]), np.array(response_shape[2:])))) + + for n in range(n_neur): + mean_response[n] = response[n, lat[n]:(lat[n] + resp_nbin + 1)].mean(axis=0) + + return mean_response + + +def calculate_latency(response, t_pk=[7, 22], t_lat_min=2, max_d_lat=10, thrsh=0.15): + + # n_neur, n_tb, n_type, n_fam = meanByFam.shape + + response_shape = response.shape + n_neur = response_shape[0] + + lat = np.zeros(n_neur).astype(int) + for n in range(n_neur): + pref_stim = np.unravel_index(np.argmax(response[n, t_pk[0]:t_pk[1]]), response_shape[1:]) + pk_lat = pref_stim[0] + if len(pref_stim)==2: + pref_response = response[n, :, pref_stim[1]] + elif len(pref_stim)==3: + pref_response = response[n, :, pref_stim[1], pref_stim[2]] + elif len(pref_stim) == 4: + pref_response = response[n, :, pref_stim[1], pref_stim[2], pref_stim[3]] + + pref_response_pk = np.max(pref_response) + pk_lat = pk_lat + t_pk[0] + + min_lat = np.max([t_lat_min, pk_lat-max_d_lat]) + min_val = np.min(pref_response[min_lat:pk_lat]) + + lat_min = np.argmin(pref_response[min_lat:pk_lat]) + min_lat + range_val = pref_response_pk - min_val + + lat[n] = np.argwhere(pref_response[lat_min:(pk_lat + 1)] <= (min_val + thrsh * range_val))[-1][0] \ + + lat_min + 1 + return lat + + +def calculate_texture_modulation(response): + texMod_fam = (response[1, :] - response[0, :]) / (response[1, :] + response[0, :]) + texMod = np.nanmean(texMod_fam) + + return texMod, texMod_fam + + +def calculate_sparseness(response): + + response = response.reshape(-1) + n_stim = response.shape[0] + + spars = (1 - ((response.sum() / n_stim) ** 2) / ((response ** 2).sum() / n_stim)) / (1 - 1 / n_stim) + + return spars + + +def calculate_variance_ratio(response): + + residual_ms, sample_ms, family_ms = calculate_variance(response) + response_shape = response.shape + + if len(response_shape) == 3: + residual_variance = residual_ms + sample_variance = (sample_ms - residual_ms) / response_shape[2] + family_variance = (family_ms - sample_ms) / (response_shape[2]*response_shape[1]) + + else: + residual_variance = 0 + sample_variance = sample_ms + family_variance = (family_ms - sample_ms) / response_shape[1] + + total_variance = residual_variance + sample_variance + family_variance + + variance_ratio = (family_variance / total_variance + 0.02) / (sample_variance / total_variance + 0.02) + + return variance_ratio, sample_variance / total_variance, family_variance / total_variance + + +def calculate_variance(response): + + response_shape = response.shape + + if len(response_shape) == 3: + a, b, n = response_shape + + sample_mean = response.mean(axis=2) + family_mean = sample_mean.mean(axis=1) + all_mean = family_mean.mean() + + residual_ms = np.sum((response - sample_mean.reshape(a, b, 1)) ** 2) / (a * b * (n - 1)) + sample_ms = n * np.sum((sample_mean - family_mean.reshape(a, 1)) ** 2) / (a * (b - 1)) + family_ms = b*n*np.sum((family_mean - all_mean) ** 2) / (a - 1) + else: + a, b = response_shape + + sample_mean = response + family_mean = sample_mean.mean(axis=1) + all_mean = family_mean.mean() + + residual_ms = np.nan + sample_ms = np.sum((sample_mean - family_mean.reshape(a, 1)) ** 2) / (a * (b - 1)) + family_ms = b * np.sum((family_mean - all_mean) ** 2) / (a - 1) + + return residual_ms, sample_ms, family_ms + + +if __name__ == '__main__': + main() diff --git a/mkgu_packaging/dicarlo/marques/marques_gen_stim.py b/mkgu_packaging/dicarlo/marques/marques_gen_stim.py new file mode 100644 index 0000000..d096f80 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_gen_stim.py @@ -0,0 +1,104 @@ +import os +import numpy as np +from mkgu_packaging.dicarlo.marques.marques_stim_common import gen_grating_stim, gen_blank_stim, gen_texture_stim, \ + load_stim_info +from brainio_collection.packaging import package_stimulus_set + +BLANK_STIM_NAME = 'dicarlo.Marques2020_blank' +RF_STIM_NAME = 'dicarlo.Marques2020_receptive_field' +ORIENTATION_STIM_NAME = 'dicarlo.Marques2020_orientation' +SF_STIM_NAME = 'dicarlo.Marques2020_spatial_frequency' +SIZE_STIM_NAME = 'dicarlo.Marques2020_size' +TEXTURE_STIM_NAME = 'movshon.FreemanZiemba2013_properties' + +DATA_DIR = '/braintree/data2/active/users/tmarques/bs_stimuli' +DEGREES = 12 +SIZE_PX = 672 +TEXTURE_DEGREES = 8 + +## All parameters +POS = np.array([0.5]) +RADIUS = np.logspace(-3 + 0.75, 4 - 0.75, 12, endpoint=True, base=2) / 2 +SF = np.logspace(-1.5 + 0.125, 4 - 0.125, 22, endpoint=True, base=2) +ORIENTATION = np.linspace(0, 165, 12, endpoint=True) +PHASE = np.linspace(0, 315, 8, endpoint=True) +CONTRAST = [1] + +## Stimulus specific +POS_rf = np.linspace(-2.5, 2.5, 21, endpoint=True) +RADIUS_rf = [1/6] +SF_rf = [3] +ORIENTATION_rf = ORIENTATION[[0, 3, 6, 9]] +PHASE_rf = PHASE[[0, 4]] +RF_PARAMS = np.array(np.meshgrid(POS_rf, POS_rf, CONTRAST, PHASE_rf, ORIENTATION_rf, SF_rf, + RADIUS_rf)).T.reshape(-1, 7)[:,[0,1,2,6,5,4,3]] + +RADIUS_sf = np.array([0.75, 2.25]) +ORIENTATION_sf = ORIENTATION[[0, 2, 4, 6, 8, 10]] +SF_PARAMS = np.array(np.meshgrid(POS, POS, CONTRAST, PHASE, ORIENTATION_sf, SF, + RADIUS_sf)).T.reshape(-1, 7)[:,[0,1,2,6,5,4,3]] + +SF_size = SF[[4, 8, 12, 16]] +ORIENTATION_size = ORIENTATION[[0, 2, 4, 6, 8, 10]] +SIZE_PARAMS = np.array(np.meshgrid(POS, POS, CONTRAST, PHASE, ORIENTATION_size, SF_size, + RADIUS)).T.reshape(-1, 7)[:,[0,1,2,6,5,4,3]] + +SF_or = SF[[4,8,12,16]] +MULT_RADIUS_or = [2, 3, 4] +SF_RADIUS_or = np.zeros((len(SF_or)*len(MULT_RADIUS_or), 2)) +ind = 0 +for mr in MULT_RADIUS_or: + for sf in SF_or: + SF_RADIUS_or[ind] = [mr/2/sf, sf] + ind += 1 +ORIENTATION_PHASE_or = np.array(np.meshgrid(ORIENTATION, PHASE)).T.reshape(-1, 2) +ORIENTATION_PARAMS = np.zeros((len(SF_RADIUS_or) * len(ORIENTATION_PHASE_or), 7)) +ind = 0 +for sf_rad in SF_RADIUS_or: + for or_phase in ORIENTATION_PHASE_or: + ORIENTATION_PARAMS[ind] = np.concatenate((POS, POS, CONTRAST, sf_rad, or_phase)) + ind += 1 + +STIM_NAMES = [RF_STIM_NAME, ORIENTATION_STIM_NAME, SF_STIM_NAME, SIZE_STIM_NAME] + +GRAT_PARAMS = {RF_STIM_NAME: RF_PARAMS, ORIENTATION_STIM_NAME: ORIENTATION_PARAMS, SF_STIM_NAME: SF_PARAMS, + SIZE_STIM_NAME: SIZE_PARAMS} + +# POS_DICT = {RF_STIM_NAME: POS_rf, ORIENTATION_STIM_NAME: POS, SF_STIM_NAME: POS, SIZE_STIM_NAME: POS} +# RADIUS_DICT = {RF_STIM_NAME: np.array([1/6]), ORIENTATION_STIM_NAME: np.array([0.25, 0.5, 1]), +# SF_STIM_NAME: np.array([0.75, 2.25]), SIZE_STIM_NAME: RADIUS} +# SF_DICT = {RF_STIM_NAME: np.array([3]), ORIENTATION_STIM_NAME: np.array([1, 2, 4]), SF_STIM_NAME: SF, +# SIZE_STIM_NAME: np.array([1, 2, 4])} +# ORIENTATION_DICT = {RF_STIM_NAME: ORIENTATION[[0, 3, 6, 9]], ORIENTATION_STIM_NAME: ORIENTATION, +# SF_STIM_NAME: ORIENTATION[[0, 2, 4, 6, 8, 10]], SIZE_STIM_NAME: ORIENTATION[[0, 2, 4, 6, 8, 10]]} +# PHASE_DICT = {RF_STIM_NAME: PHASE[[0, 4]], ORIENTATION_STIM_NAME: PHASE, SF_STIM_NAME: PHASE, SIZE_STIM_NAME: PHASE} + + +def main(): + blank_dir = DATA_DIR + os.sep + BLANK_STIM_NAME + if not (os.path.isdir(blank_dir)): + gen_blank_stim(degrees=DEGREES, size_px=448, save_dir=blank_dir) + stimuli = load_stim_info(BLANK_STIM_NAME, blank_dir) + print('Packaging stimuli:' + stimuli.identifier) + package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') + + for stim_name in STIM_NAMES: + stim_dir = DATA_DIR + os.sep + stim_name + if not (os.path.isdir(stim_dir)): + gen_grating_stim(degrees=DEGREES, size_px=SIZE_PX, stim_name=stim_name, grat_params=GRAT_PARAMS[stim_name], + save_dir=stim_dir) + stimuli = load_stim_info(stim_name, stim_dir) + print('Packaging stimuli:' + stimuli.identifier) + package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo') + + texture_dir = DATA_DIR + os.sep + TEXTURE_STIM_NAME + if not (os.path.isdir(texture_dir)): + gen_texture_stim(degrees=TEXTURE_DEGREES, stim_pos=np.array([POS[0], POS[0]]), save_dir=texture_dir) + stimuli = load_stim_info(TEXTURE_STIM_NAME, texture_dir) + print('Packaging stimuli:' + stimuli.identifier) + package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.contrib') + return + + +if __name__ == '__main__': + main() diff --git a/mkgu_packaging/dicarlo/marques/marques_ringach2002.py b/mkgu_packaging/dicarlo/marques/marques_ringach2002.py new file mode 100644 index 0000000..c6f5b15 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_ringach2002.py @@ -0,0 +1,94 @@ + +import numpy as np +from brainio_collection.packaging import package_data_assembly +import scipy.io as sio +from brainio_base.assemblies import DataAssembly + +DATA_DIR = '/braintree/data2/active/users/tmarques/bs_datasets/Ringach2002.mat' +ASSEMBLY_NAME = 'shapley.Ringach2002' +ORIENTATION_STIM_NAME = 'dicarlo.Marques2020_orientation' + +PROPERTY_NAMES = ['baseline', 'max_dc', 'min_dc', 'max_ac', 'modulation_ratio', 'circular_variance', 'bandwidth', + 'orthogonal_preferred_ratio', 'orientation_selective', 'circular_variance_bandwidth_ratio', + 'orthogonal_preferred_ratio_circular_variance_difference', + 'orthogonal_preferred_ratio_bandwidth_ratio'] + + +def collect_data(data_dir): + ringach2002 = sio.loadmat(data_dir) + or_data = ringach2002['db'] + + # Response magnitudes + baseline = or_data['spont'][0, 0].T + n_neuroids = baseline.shape[0] + + max_dc = or_data['maxdc'][0, 0].T + min_dc = or_data['mindc'][0, 0].T + max_ac = or_data['maxfirst'][0, 0].T + modulation_ratio = max_ac / max_dc + + # Orientation tuning properties + circular_variance = or_data['orivar'][0, 0].T + bandwidth = or_data['bw'][0, 0].T + bandwidth[bandwidth > 90] = np.nan + orthogonal_preferred_ratio = or_data['po'][0, 0].T + + orientation_selective = np.ones((n_neuroids, 1)) + orientation_selective[np.isnan(bandwidth)] = 0 + + # Orientation tuning properties covariances + circular_variance_bandwidth_ratio = circular_variance / bandwidth + orthogonal_preferred_ratio_circular_variance_difference = orthogonal_preferred_ratio - circular_variance + orthogonal_preferred_ratio_bandwidth_ratio = orthogonal_preferred_ratio/bandwidth + + # Bins + max_dc_bins = np.logspace(0, 3, 10, base=10) + max_ac_bins = np.logspace(0, 3, 10, base=10) + min_dc_bins = np.logspace(-1-1/3, 2, 11, base=10) + baseline_bins = np.logspace(-1-1/3, 2, 11, base=10) + modulation_ratio_bins = np.linspace(0, 2, 11) + + circular_variance_bins = np.linspace(0, 1, num=14) + bandwidth_bins = np.linspace(0, 90, num=18) + orthogonal_preferred_ratio_bins = np.linspace(0, 1, num=14) + orientation_selective_bins = np.linspace(0, 1, num=3) + + circular_variance_bandwidth_ratio_bins = np.logspace(-3, 0, num=16, base=10) + orthogonal_preferred_ratio_bandwidth_ratio_bins = np.logspace(-3, 0, num=16, base=10) + orthogonal_preferred_ratio_circular_variance_difference_bins = np.linspace(-1, 1, num=20) + + # Create DataAssembly with single neuronal properties and bin information + assembly = np.concatenate((baseline, max_dc, min_dc, max_ac, modulation_ratio, circular_variance, bandwidth, + orthogonal_preferred_ratio, orientation_selective, + circular_variance_bandwidth_ratio, + orthogonal_preferred_ratio_circular_variance_difference, + orthogonal_preferred_ratio_bandwidth_ratio), axis=1) + + # Filters neurons with weak responses + good_neuroids = max_dc > baseline + 5 + assembly = assembly[np.argwhere(good_neuroids)[:, 0], :] + + assembly = DataAssembly(assembly, coords={'neuroid_id': ('neuroid', range(assembly.shape[0])), + 'region': ('neuroid', ['V1'] * assembly.shape[0]), + 'neuronal_property': PROPERTY_NAMES}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data(DATA_DIR) + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=ORIENTATION_STIM_NAME, + assembly_class='PropertyAssembly', bucket_name='brainio.contrib') + + +if __name__ == '__main__': + main() + diff --git a/mkgu_packaging/dicarlo/marques/marques_schiller1976c.py b/mkgu_packaging/dicarlo/marques/marques_schiller1976c.py new file mode 100644 index 0000000..d61073c --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_schiller1976c.py @@ -0,0 +1,59 @@ + +import numpy as np +from marques_utils import gen_sample +from brainio_collection.packaging import package_data_assembly +from brainio_base.assemblies import DataAssembly + +ASSEMBLY_NAME = 'schiller.Schiller1976c' +SPATIAL_FREQUENCY_STIM_NAME = 'dicarlo.Marques2020_spatial_frequency' + + +def collect_data(): + n_neurons = 87 + # Spatial frequency selective data + spatial_frequency_selective_bins = np.linspace(0, 1, num=3) + spatial_frequency_selective_hist = np.array([14, 73]) + spatial_frequency_selective = np.concatenate((np.zeros(spatial_frequency_selective_hist[0]), + np.ones(spatial_frequency_selective_hist[1]))).reshape((-1,1)) + + # Spatial frequency bandwidth data + spatial_frequency_bandwidth_bins = np.linspace(0, 100, num=11) + spatial_frequency_bandwidth_simple_hist = np.array([0, 0, 0, 6, 5, 10, 7, 2, 0, 0]) + spatial_frequency_bandwidth_complex_hist = np.array([0, 3, 4, 10, 17, 6, 2, 1, 0, 0]) + spatial_frequency_bandwidth_hist = (spatial_frequency_bandwidth_simple_hist + + spatial_frequency_bandwidth_complex_hist) + spatial_frequency_bandwidth = gen_sample(spatial_frequency_bandwidth_hist, spatial_frequency_bandwidth_bins, + scale='linear') + filler = np.zeros((n_neurons - spatial_frequency_bandwidth_hist.sum(), 1)) + filler[:] = np.nan + spatial_frequency_bandwidth = np.concatenate((filler, spatial_frequency_bandwidth)) + + # Create DataAssembly with single neuronal properties and bin information + assembly = np.concatenate((spatial_frequency_selective, spatial_frequency_bandwidth), axis=1) + + assembly = DataAssembly(assembly, coords={'neuroid_id': ('neuroid', range(assembly.shape[0])), + 'region': ('neuroid', ['V1'] * assembly.shape[0]), + 'neuronal_property': ['spatial_frequency_selective', + 'spatial_frequency_bandwidth']}, + dims=['neuroid', 'neuronal_property']) + + assembly.attrs['number_of_trials'] = 20 + + for p in assembly.coords['neuronal_property'].values: + assembly.attrs[p+'_bins'] = eval(p+'_bins') + + return assembly + + +def main(): + assembly = collect_data() + assembly.name = ASSEMBLY_NAME + print('Packaging assembly') + package_data_assembly(assembly, assembly_identifier=assembly.name, + stimulus_set_identifier=SPATIAL_FREQUENCY_STIM_NAME, assembly_class='PropertyAssembly', + bucket_name='brainio.contrib') + + +if __name__ == '__main__': + main() + diff --git a/mkgu_packaging/dicarlo/marques/marques_stim_common.py b/mkgu_packaging/dicarlo/marques/marques_stim_common.py new file mode 100644 index 0000000..2775a13 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_stim_common.py @@ -0,0 +1,389 @@ + +import numpy as np +import imageio +import os +import pandas as pd +from glob import glob + +import matplotlib.pyplot as plt +from brainio_base.stimuli import StimulusSet + + +class Stimulus: + def __init__(self, size_px=[448, 448], bit_depth=8, + stim_id=1000, save_dir='images', type_name='stimulus', + format_id='{0:04d}'): + self.save_dir = save_dir + self.stim_id = stim_id + self.format_id = format_id + self.type_name = type_name + + self.white = np.uint8(2**bit_depth-1) + self.black = np.uint8(0) + self.gray = np.uint8(self.white/2+1) + self.size_px = size_px + self.objects = [] + self.stimulus = np.ones(self.size_px, dtype=np.uint8) * self.gray + + def add_object(self, stim_object): + self.objects.append(stim_object) + + def build_stimulus(self): + for obj in self.objects: + self.stimulus[obj.mask] = obj.stimulus[obj.mask] + + def clear_stimulus(self): + self.stimulus = np.ones(self.size, dtype=np.uint8) * self.gray + + def show_stimulus(self): + my_dpi = 192 + fig = plt.figure() + fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False) + ax = plt.axes([0, 0, 1, 1]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(self.stimulus, cmap='gray') + plt.show() + + def save_stimulus(self): + file_name= self.type_name + '_' + self.format_id.format(self.stim_id) + '.png' + imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus) + return file_name + + +class Grating: + def __init__(self, orientation=0, phase=0, sf=2, size_px=[448, 448], width=8, + contrast=1, bit_depth=8, pos=[0, 0], rad=5, sig=0, + stim_id=1000, format_id='{0:04d}', save_dir='images', type_name='grating'): + + # save directory + self.save_dir = save_dir + self.stim_id = stim_id + self.format_id = format_id + # label for type of stimulus + self.type_name = type_name + + # 1 channel colors, white, black, grey + self.white = np.uint8(2**bit_depth-1) + self.black = np.uint8(0) + self.gray = np.uint8(self.white/2+1) + + # pixel dimensions of the image + self.size_px = np.array(size_px) + # position of image in field of view + self.pos = np.array(pos) + # pixel to visual field degree conversion + self.px_to_deg = self.size_px[1] / width + # size of stimulus in visual field in degrees + self.size = self.size_px / self.px_to_deg + + # orientation in radians + self.orientation = orientation / 180 * np.pi + # phase of the grating + self.phase = phase / 180 * np.pi + # spatial frequency of the grating + self.sf = sf + # contrast of the grating + self.contrast = contrast + + # make self.xv and self.yv store the degree positions of all pixels in the image + self.xv = np.zeros(size_px) + self.yv = np.zeros(size_px) + self.update_frame() + + self.mask = np.ones(size_px, dtype=bool) + self.set_circ_mask(rad=rad) + + self.tex = np.zeros(size_px) + self.stimulus = np.ones(size_px, dtype=np.uint8) * self.gray + + self.envelope = np.ones(size_px) + if sig is 0: + self.update_tex() + else: + self.set_gaussian_envelope(sig) + + def update_frame(self): + x = (np.arange(self.size_px[1]) - self.size_px[1]/2) / self.px_to_deg - self.pos[1] + y = (np.arange(self.size_px[0]) - self.size_px[0]/2) / self.px_to_deg - self.pos[0] + + # all possible degree coordinates in matrices of points + self.xv, self.yv = np.meshgrid(x, y) + + def update_tex(self): + # make the grating pattern + self.tex = (np.sin((self.xv * np.cos(self.orientation) + self.yv * np.sin(self.orientation)) * + self.sf * 2 * np.pi + self.phase) * self.contrast * self.envelope) + + def update_stimulus(self): + self.stimulus[self.mask] = np.uint8(((self.tex[self.mask]+1)/2)*self.white) + self.stimulus[np.logical_not(self.mask)] = self.gray + + def set_circ_mask(self, rad): + # apply operation to put a 1 for all points inclusively within the degree radius and a 0 outside it + self.mask = self.xv**2 + self.yv**2 <= rad ** 2 + + # same as circular mask but for an annulus + def set_annular_mask(self, inner_rad, outer_rad): + self.mask = (self.xv ** 2 + self.yv ** 2 <= outer_rad ** 2) * \ + (self.xv ** 2 + self.yv ** 2 > inner_rad ** 2) + + def set_gaussian_envelope(self, sig): + d = np.sqrt(self.xv**2 + self.yv**2) + self.envelope = np.exp(-d**2/(2 * sig**2)) + self.update_tex() + + def show_stimulus(self): + # pyplot stuff + self.update_stimulus() + my_dpi = 192 + fig = plt.figure() + fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False) + ax = plt.axes([0, 0, 1, 1]) + ax.set_axis_off() + fig.add_axes(ax) + ax.imshow(self.stimulus, cmap='gray') + plt.show() + + def save_stimulus(self): + # save to correct (previously specified) directory + self.update_stimulus() + file_name = self.type_name + '_' + self.format_id.format(self.stim_id) + '.png' + imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus) + return file_name + + +def load_stim_info(stim_name, data_dir): + stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str}) + image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values, + [os.path.join(data_dir, image_name) for image_name + in stim['image_file_name'].values])) + stim_set = StimulusSet(stim[stim.columns[:-1]]) + stim_set.image_paths = image_paths + stim_set.identifier = stim_name + + return stim_set + + +def gen_blank_stim(degrees, size_px, save_dir): + if not (os.path.isdir(save_dir)): + os.mkdir(save_dir) + + stim = Stimulus(size_px=[size_px, size_px], type_name='blank_stim', save_dir=save_dir, stim_id=0) + + stimuli = pd.DataFrame({'image_id': str(0), 'degrees': [degrees]}) + image_names = (stim.save_stimulus()) + + stimuli['image_file_name'] = pd.Series(image_names) + stimuli['image_current_local_file_path'] = pd.Series(save_dir + os.sep + image_names) + + stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False) + + +def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir): + if not (os.path.isdir(save_dir)): + os.mkdir(save_dir) + + width = degrees + + nStim = grat_params.shape[0] + + print('Generating stimulus: #', nStim) + + stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim}) + + image_names = nStim * [None] + image_local_file_path = nStim * [None] + all_y = nStim * [None] + all_x = nStim * [None] + all_c = nStim * [None] + all_r = nStim * [None] + all_s = nStim * [None] + all_o = nStim * [None] + all_p = nStim * [None] + + for i in np.arange(nStim): + stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 + + grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6]) + grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2], + rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5], + phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir, + size_px=[size_px, size_px], type_name=stim_name) + image_names[i] = (grat.save_stimulus()) + image_local_file_path[i] = save_dir + os.sep + image_names[i] + all_y[i] = grat_params[i, 0] + all_x[i] = grat_params[i, 1] + all_c[i] = grat_params[i, 2] + all_r[i] = grat_params[i, 3] + all_s[i] = grat_params[i, 4] + all_o[i] = grat_params[i, 5] + all_p[i] = grat_params[i, 6] + + stimuli['position_y'] = pd.Series(all_y) + stimuli['position_x'] = pd.Series(all_x) + stimuli['contrast'] = pd.Series(all_c) + stimuli['radius'] = pd.Series(all_r) + stimuli['spatial_frequency'] = pd.Series(all_s) + stimuli['orientation'] = pd.Series(all_o) + stimuli['phase'] = pd.Series(all_p) + stimuli['image_file_name'] = pd.Series(image_names) + stimuli['image_current_local_file_path'] = pd.Series(image_local_file_path) + + stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False) + + +def gen_grating_stim_old(degrees, size_px, stim_name, grat_contrast, grat_pos, grat_rad, grat_sf, grat_orientation, + grat_phase, save_dir): + if not (os.path.isdir(save_dir)): + os.mkdir(save_dir) + + width = degrees + + nStim = len(grat_pos) * len(grat_pos) * len(grat_contrast) * len(grat_rad) * len(grat_sf) * len(grat_orientation) \ + * len(grat_phase) + + print('Generating stimulus: #', nStim) + + stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim}) + + image_names = nStim * [None] + image_local_file_path = nStim * [None] + all_y = nStim * [None] + all_x = nStim * [None] + all_c = nStim * [None] + all_r = nStim * [None] + all_s = nStim * [None] + all_o = nStim * [None] + all_p = nStim * [None] + + i = 0 + for y in np.arange(len(grat_pos)): + for x in np.arange(len(grat_pos)): + for c in np.arange(len(grat_contrast)): + for r in np.arange(len(grat_rad)): + for s in np.arange(len(grat_sf)): + for o in np.arange(len(grat_orientation)): + for p in np.arange(len(grat_phase)): + grat = Grating(width=width, pos=[grat_pos[y], grat_pos[x]], + contrast=grat_contrast[c], rad=grat_rad[r], + sf=grat_sf[s], orientation=grat_orientation[o], + phase=grat_phase[p], + stim_id=np.uint64( + y * 10e9 + x * 10e7 + r * 10e5 + s * 10e3 + o * 10e1 + p), + format_id='{0:012d}', save_dir=save_dir, size_px=[size_px, size_px], + type_name=stim_name) + image_names[i] = (grat.save_stimulus()) + image_local_file_path[i] = save_dir + os.sep + image_names[i] + all_y[i] = grat_pos[y] + all_x[i] = grat_pos[x] + all_c[i] = grat_contrast[c] + all_r[i] = grat_rad[r] + all_s[i] = grat_sf[s] + all_o[i] = grat_orientation[o] + all_p[i] = grat_phase[p] + i += 1 + + stimuli['position_y'] = pd.Series(all_y) + stimuli['position_x'] = pd.Series(all_x) + stimuli['contrast'] = pd.Series(all_c) + stimuli['radius'] = pd.Series(all_r) + stimuli['spatial_frequency'] = pd.Series(all_s) + stimuli['orientation'] = pd.Series(all_o) + stimuli['phase'] = pd.Series(all_p) + stimuli['image_file_name'] = pd.Series(image_names) + stimuli['image_current_local_file_path'] = pd.Series(image_local_file_path) + + stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False) + + +def gen_texture_stim(degrees, stim_pos, save_dir): + if not (os.path.isdir(save_dir)): + os.mkdir(save_dir) + + original_dir = '/braintree/home/tmarques/.brainio/image_movshon_stimuli/movshon_stimuli' + + gray_c = 128 + + input_degrees = 4 + aperture_degrees = 4 + + original_filenames = [] + for image_file_path in glob(f"{original_dir}/*.png"): + original_filenames.append(image_file_path) + + nStim = len(original_filenames) + + im_temp = imageio.imread(original_filenames[0]) + + # Image size + size_px = np.array(im_temp.shape).astype(int) + px_deg = size_px[0] / input_degrees + + size_px_out = (size_px * (degrees / input_degrees)).astype(int) + cnt_px = (stim_pos * px_deg).astype(int) + + size_px_disp = ((size_px_out - size_px) / 2).astype(int) + + fill_ind = [[(size_px_disp[0] + cnt_px[0]), (size_px_disp[0] + cnt_px[0] + size_px[0])], + [(size_px_disp[1] + cnt_px[1]), (size_px_disp[1] + cnt_px[1] + size_px[1])]] + + # Image aperture + a = aperture_degrees * px_deg / 2 + # Meshgrid with pixel coordinates + x = (np.arange(size_px_out[1]) - size_px_out[1] / 2) + y = (np.arange(size_px_out[0]) - size_px_out[0] / 2) + xv, yv = np.meshgrid(x, y) + # Raised cosine aperture + inner_mask = (xv - cnt_px[1]) ** 2 + (yv - cnt_px[0]) ** 2 < a ** 2 + cos_mask = 1 / 2 * (1 + np.cos(np.sqrt((xv - cnt_px[1]) ** 2 + (yv - cnt_px[0]) ** 2) / a * np.pi)) + cos_mask[np.logical_not(inner_mask)] = 0 + + familyNumOrder = np.array([60, 56, 13, 48, 71, 18, 327, 336, 402, 38, 23, 52, 99, 393, 30]) + + type = [] + sample = [] + family = [] + position_x = [] + position_y = [] + image_names = nStim * [None] + image_local_file_path = nStim * [None] + + for n in range(nStim): + im = imageio.imread(original_filenames[n]) + im = im - gray_c * np.ones(size_px) + im_template = np.zeros(size_px_out) + im_template[fill_ind[0][0]:fill_ind[0][1], fill_ind[1][0]:fill_ind[1][1]] = im + im_masked = (im_template * cos_mask) + gray_c * np.ones(size_px_out) + + file_name = original_filenames[n].split(os.sep)[-1] + + file_parts = file_name.split(os.sep)[-1].split('.')[0].split('-') + if file_parts[0].find('tex') == -1: + type.append(1) + else: + type.append(2) + + position_y.append(stim_pos[0]) + position_x.append(stim_pos[1]) + sample.append(int(file_parts[3][file_parts[3].find('smp')+3:])) + family_temp = int(file_parts[2][file_parts[2].find('im')+2:]) + family.append(np.where(familyNumOrder == family_temp)[0][0]+1) + + image_names[n] = 'aperture_' + file_name + image_local_file_path[n] = save_dir + os.sep + image_names[n] + + imageio.imwrite(image_local_file_path[n], np.uint8(im_masked)) + + stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [degrees] * nStim}) + + stimuli['position_y'] = pd.Series(position_y) + stimuli['position_x'] = pd.Series(position_x) + stimuli['family'] = pd.Series(family) + stimuli['sample'] = pd.Series(sample) + stimuli['type'] = pd.Series(type) + stimuli['image_file_name'] = pd.Series(image_names) + stimuli['image_current_local_file_path'] = pd.Series(image_local_file_path) + + stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False) + + diff --git a/mkgu_packaging/dicarlo/marques/marques_utils.py b/mkgu_packaging/dicarlo/marques/marques_utils.py new file mode 100644 index 0000000..137b1d6 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/marques_utils.py @@ -0,0 +1,21 @@ + +import numpy as np + + +def gen_sample(hist, bins, scale='linear'): + random_sample = np.empty([0,1]) + + if scale == 'log2': + bins = np.log2(bins) + if scale == 'log10': + bins = np.log10(bins) + + for b_ind, b in enumerate(bins[:-1]): + random_sample = np.concatenate((random_sample, (np.random.random_sample(hist[b_ind]) * + (bins[b_ind+1]-bins[b_ind]) + bins[b_ind]).reshape((-1, 1))), axis=0) + if scale == 'log2': + random_sample = 2 ** random_sample + if scale == 'log10': + random_sample = 10 ** random_sample + + return random_sample diff --git a/mkgu_packaging/dicarlo/marques/setup.py b/mkgu_packaging/dicarlo/marques/setup.py new file mode 100644 index 0000000..5732743 --- /dev/null +++ b/mkgu_packaging/dicarlo/marques/setup.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup + +requirements = [ + "pandas", + "imageio", +] + +setup( + install_requires=requirements, +)