diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..9b839e3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,146 @@
+input_files/*
+Python_code/phase_processing/*
+Python_code/processed_phase/*
+Python_code/doppler_traces*/*
+Python_code/plots/*
+Python_code/outputs/*
+Python_code/logs/*
+Python_code/cache_files/*
+Python_code/networks/*
+Python_code/evaluations/*
+*.asv
+*00001
+*.index
+*.h5
+.idea/*
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+*.out
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/Python_code/CSI_doppler_computation.py b/Python_code/CSI_doppler_computation.py
new file mode 100644
index 0000000..d574a0d
--- /dev/null
+++ b/Python_code/CSI_doppler_computation.py
@@ -0,0 +1,135 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import scipy.io as sio
+import math as mt
+from scipy.fftpack import fft
+from scipy.fftpack import fftshift
+from scipy.signal.windows import hann
+import pickle
+import os
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('subdirs', help='Sub-directories')
+ parser.add_argument('dir_doppler', help='Directory to save the Doppler data')
+ parser.add_argument('start', help='Start processing', type=int)
+ parser.add_argument('end', help='End processing (samples from the end)', type=int)
+ parser.add_argument('sample_length', help='Number of packet in a sample', type=int)
+ parser.add_argument('sliding', help='Number of packet for sliding operations', type=int)
+ parser.add_argument('noise_level', help='Level for the noise to be removed', type=float)
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+ args = parser.parse_args()
+
+ sub_sampling = args.sub_sampling
+ num_symbols = args.sample_length
+ # num_symbols = mt.ceil(num_symbols / sub_sampling)
+
+ middle = int(mt.floor(num_symbols / 2))
+
+ Tc = 7.5e-3
+ fc = 5785e6
+ v_light = 3e8
+
+ sliding = args.sliding
+ noise_lev = args.noise_level
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+
+ list_subdir = args.subdirs
+
+ for subdir in list_subdir.split(','):
+ path_doppler = args.dir_doppler + subdir
+ if not os.path.exists(path_doppler):
+ os.mkdir(path_doppler)
+
+ exp_dir = args.dir + subdir + '/'
+
+ names = []
+ all_files = os.listdir(exp_dir)
+ for i in range(len(all_files)):
+ names.append(all_files[i][:-4])
+
+ for name in names:
+ path_doppler_name = path_doppler + '/' + name + '_bandw' + str(bandwidth) + \
+ '_RU' + str(sub_band) + \
+ '_sampling' + str(sub_sampling) + '.txt'
+ if os.path.exists(path_doppler_name):
+ continue
+
+ print(path_doppler_name)
+ name_file = exp_dir + name + '.mat'
+ mdic = sio.loadmat(name_file)
+ csi_matrix_processed = mdic['csi_matrix_processed']
+
+ csi_matrix_processed = csi_matrix_processed[args.start:-args.end, :, :]
+
+ csi_matrix_processed[:, :, 0] = csi_matrix_processed[:, :, 0] / np.mean(csi_matrix_processed[:, :, 0],
+ axis=1, keepdims=True)
+
+ csi_matrix_complete = csi_matrix_processed[:, :, 0]*np.exp(1j*csi_matrix_processed[:, :, 1])
+
+ if sub_sampling != 1:
+ csi_matrix_complete = csi_matrix_complete[0:-1:sub_sampling, :]
+
+ if bandwidth == 40:
+ if sub_band == 1:
+ selected_subcarriers_idxs = np.arange(-500, -17, 1) + 500
+ elif sub_band == 2:
+ selected_subcarriers_idxs = np.arange(17, 500, 1) + 500
+ num_selected_subcarriers = selected_subcarriers_idxs.shape[0]
+ csi_matrix_complete = csi_matrix_complete[:, selected_subcarriers_idxs]
+ elif bandwidth == 20:
+ if sub_band == 1:
+ selected_subcarriers_idxs = np.arange(-500, -259, 1) + 500
+ elif sub_band == 2:
+ selected_subcarriers_idxs = np.arange(-258, -17, 1) + 500
+ elif sub_band == 3:
+ selected_subcarriers_idxs = np.arange(17, 258, 1) + 500
+ elif sub_band == 4:
+ selected_subcarriers_idxs = np.arange(259, 500, 1) + 500
+ num_selected_subcarriers = selected_subcarriers_idxs.shape[0]
+ csi_matrix_complete = csi_matrix_complete[:, selected_subcarriers_idxs]
+
+ csi_d_profile_list = []
+ for i in range(0, csi_matrix_complete.shape[0]-num_symbols, sliding):
+ csi_matrix_cut = csi_matrix_complete[i:i+num_symbols, :]
+ csi_matrix_cut = np.nan_to_num(csi_matrix_cut)
+
+ hann_window = np.expand_dims(hann(num_symbols), axis=-1)
+ csi_matrix_wind = np.multiply(csi_matrix_cut, hann_window)
+ csi_doppler_prof = fft(csi_matrix_wind, n=100, axis=0)
+ csi_doppler_prof = fftshift(csi_doppler_prof, axes=0)
+
+ csi_d_map = np.abs(csi_doppler_prof * np.conj(csi_doppler_prof))
+ csi_d_map = np.sum(csi_d_map, axis=1)
+ csi_d_profile_list.append(csi_d_map)
+ csi_d_profile_array = np.asarray(csi_d_profile_list)
+ csi_d_profile_array_max = np.max(csi_d_profile_array, axis=1, keepdims=True)
+ csi_d_profile_array = csi_d_profile_array/csi_d_profile_array_max
+ csi_d_profile_array[csi_d_profile_array < mt.pow(10, noise_lev)] = mt.pow(10, noise_lev)
+
+ with open(path_doppler_name, "wb") as fp: # Pickling
+ pickle.dump(csi_d_profile_array, fp)
diff --git a/Python_code/CSI_doppler_create_datasets_cross_val.py b/Python_code/CSI_doppler_create_datasets_cross_val.py
new file mode 100644
index 0000000..d3f145d
--- /dev/null
+++ b/Python_code/CSI_doppler_create_datasets_cross_val.py
@@ -0,0 +1,165 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import glob
+import os
+import numpy as np
+import pickle
+import math as mt
+import shutil
+from dataset_utility import create_windows_antennas, convert_to_number
+from itertools import combinations, permutations
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('num_folders', help='Number of folders', type=int)
+ parser.add_argument('num_folders_train', help='Number of folders for training', type=int)
+ parser.add_argument('num_folders_val', help='Number of folders for validation', type=int)
+ parser.add_argument('window_length', help='Number of samples per window', type=int)
+ parser.add_argument('stride_length', help='Number of samples to stride', type=int)
+ parser.add_argument('labels_activities', help='Labels of the activities to be considered')
+ parser.add_argument('n_tot', help='Number of streams * number of antennas', type=int)
+ parser.add_argument('noise_level', help='Level for the noise to be removed (pay attention that noise has been '
+ 'removed also when computing Doppler, here you can only remove more noise)',
+ type=float)
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+ args = parser.parse_args()
+
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+ sub_sampling = args.sub_sampling
+ noise_lev = args.noise_level
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+
+ labels_activities = args.labels_activities
+ csi_label_dict = []
+ for lab_act in labels_activities.split(','):
+ csi_label_dict.append(lab_act)
+ activities = np.asarray(labels_activities)
+
+ n_tot = args.n_tot
+ exp_dir = args.dir
+ save_dir = exp_dir + 'dataset_train_val_test/'
+ if not os.path.exists(save_dir):
+ os.mkdir(save_dir)
+ path_save = save_dir + str(activities) + '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + \
+ '_sampling' + str(sub_sampling)
+ if os.path.exists(path_save):
+ remove_files = glob.glob(path_save + '/*')
+ for f in remove_files:
+ shutil.rmtree(f)
+ else:
+ os.mkdir(path_save)
+
+ window_length = args.window_length # number of windows considered
+ stride_length = args.stride_length
+
+ num_folders = args.num_folders
+ num_folders_train = args.num_folders_train
+ num_folders_val = args.num_folders_val
+
+ names_files_folders = []
+ labels_wind_files_folders = []
+ num_wind_files_folders = []
+ for folder_idx in range(1, num_folders + 1):
+ save_dir_folder = save_dir + str(activities) + suffix + '/' + str(folder_idx) + '/'
+ if not os.path.exists(save_dir_folder):
+ os.mkdir(save_dir_folder)
+ csi_matrices = []
+ labels_tot = []
+ lengths_tot = []
+ for act in csi_label_dict:
+ name = act + str(folder_idx)
+ if act != 'E':
+ name = name + '_P1'
+ # E1, E2, E3, E4, W1_P1, W2_P1, W3_P1, W4_P1, R1_P1, R2_P1, R3_P1, R4_P1, S1_P1, S2_P1, S3_P1, S4_P1
+ csi_matrix = []
+ label = convert_to_number(act, csi_label_dict)
+ for i_ant in range(n_tot):
+ name_file = exp_dir + name + '/' + name + '_stream_' + str(i_ant) + '_bandw' + str(bandwidth) + \
+ '_RU' + str(sub_band) + '_sampling' + str(sub_sampling) + '.txt'
+ with open(name_file, "rb") as fp: # Unpickling
+ stft_sum_1 = pickle.load(fp)
+ stft_sum_1[stft_sum_1 < mt.pow(10, noise_lev)] = mt.pow(10, noise_lev)
+ stft_sum_1_mean = stft_sum_1 - np.mean(stft_sum_1, axis=0, keepdims=True)
+ csi_matrix.append(stft_sum_1_mean.T)
+ lengths_tot.append(stft_sum_1_mean.shape[0])
+ labels_tot.append(label)
+ csi_matrices.append(np.asarray(csi_matrix))
+
+ csi_matrices_wind, labels_wind = create_windows_antennas(csi_matrices, labels_tot,
+ window_length, stride_length, remove_mean=False)
+ num_windows = np.floor((np.asarray(lengths_tot) - window_length - 1) / stride_length + 1)
+ if not len(csi_matrices_wind) == np.sum(num_windows):
+ print('ERROR - shapes mismatch', len(csi_matrices_wind), np.sum(num_windows))
+
+ names_files = []
+ for ii in range(len(csi_matrices_wind)):
+ name_file = save_dir_folder + str(ii) + '.txt'
+ names_files.append(name_file)
+ with open(name_file, "wb") as fp: # Pickling
+ pickle.dump(csi_matrices_wind[ii], fp)
+ names_files_folders.append(names_files)
+ labels_wind_files_folders.append(labels_wind)
+ num_wind_files_folders.append(num_windows)
+ a = 1
+
+ folders_idx = list(np.arange(num_folders))
+ num_elements_comb = 2
+ num_elements_permut = 2
+ comb_train = combinations(folders_idx, num_elements_comb)
+ list_sets_name = ['train', 'val', 'test']
+ for train_set in comb_train:
+ folders_idx_val_test = set(folders_idx).difference(train_set)
+ perm_val_test = permutations(folders_idx_val_test, num_elements_permut)
+ for val_test_indices in perm_val_test:
+ val_indices = list(val_test_indices[:num_folders_val])
+ test_indices = list(val_test_indices[num_folders_val:])
+ train_indices = list(train_set)
+ list_indices_sets = [train_indices, val_indices, test_indices]
+
+ save_dir_folder = save_dir + str(activities) + suffix + '/train_' + str(np.asarray(train_indices)+1) + '_val_' \
+ + str(np.asarray(val_indices)+1) + '_test_' + str(np.asarray(test_indices)+1) + '/'
+ if not os.path.exists(save_dir_folder):
+ os.mkdir(save_dir_folder)
+
+ for set_idx in range(3):
+ files_indices = list_indices_sets[set_idx]
+ labels_files = []
+ names_files_save = []
+ num_wind_files = []
+ for files_idx in files_indices:
+ labels_files.extend(labels_wind_files_folders[files_idx])
+ names_files_save.extend(names_files_folders[files_idx])
+ num_wind_files.extend(num_wind_files_folders[files_idx])
+
+ name_labels = save_dir_folder + '/labels_' + list_sets_name[set_idx] + '_' + str(activities) + '.txt'
+ with open(name_labels, "wb") as fp: # Pickling
+ pickle.dump(labels_files, fp)
+ name_f = save_dir_folder + '/files_' + list_sets_name[set_idx] + '_' + str(activities) + '.txt'
+ with open(name_f, "wb") as fp: # Pickling
+ pickle.dump(names_files_save, fp)
+ name_f = save_dir_folder + '/num_windows_' + list_sets_name[set_idx] + '_' + str(activities) + '.txt'
+ with open(name_f, "wb") as fp: # Pickling
+ pickle.dump(num_wind_files, fp)
diff --git a/Python_code/CSI_doppler_plots_antennas.py b/Python_code/CSI_doppler_plots_antennas.py
new file mode 100644
index 0000000..1da2462
--- /dev/null
+++ b/Python_code/CSI_doppler_plots_antennas.py
@@ -0,0 +1,127 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+import math as mt
+import os
+import matplotlib.pyplot as plt
+import matplotlib.gridspec as gridspec
+from matplotlib import rcParams
+
+rcParams['font.family'] = 'serif'
+rcParams['font.serif'] = 'Times'
+rcParams['text.usetex'] = 'true'
+rcParams['text.latex.preamble'] = [r'\usepackage{newtxmath}']
+rcParams['font.size'] = 16
+
+
+def plt_fft_doppler_antennas(doppler_spectrum_list, sliding_lenght, delta_v, name_plot):
+ if doppler_spectrum_list:
+ fig = plt.figure()
+ gs = gridspec.GridSpec(4, 1, figure=fig)
+ step = 15
+ length_v = mt.floor(doppler_spectrum_list[0].shape[1] / 2)
+ factor_v = step * (mt.floor(length_v / step))
+ ticks_y = np.arange(length_v - factor_v, length_v + factor_v + 1, step)
+ ticks_x = np.arange(0, doppler_spectrum_list[0].shape[0], int(doppler_spectrum_list[0].shape[0]/20))
+ ax = []
+
+ for p_i in range(len(doppler_spectrum_list)):
+ ax1 = fig.add_subplot(gs[(p_i, 0)])
+ plt1 = ax1.pcolormesh(doppler_spectrum_list[p_i].T, cmap='viridis', linewidth=0, rasterized=True)
+ plt1.set_edgecolor('face')
+ cbar1 = fig.colorbar(plt1)
+ cbar1.ax.set_ylabel('power [dB]', rotation=270, labelpad=14)
+ ax1.set_ylabel(r'velocity [m/s]')
+ ax1.set_xlabel(r'time [s]')
+ ax1.set_yticks(ticks_y + 0.5)
+ ax1.set_yticklabels(np.round((ticks_y - length_v) * delta_v, 2))
+ ax1.set_xticks(ticks_x)
+ ax1.set_xticklabels(np.round(ticks_x * sliding_lenght * 6e-3, 2))
+ ax.append(ax1)
+
+ for axi in ax:
+ axi.label_outer()
+ fig.set_size_inches(20, 10)
+ plt.savefig(name_plot, bbox_inches='tight')
+ plt.close()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir_doppler', help='Directory of data')
+ parser.add_argument('subdirs', help='Sub directory of data')
+ parser.add_argument('sample_length', help='Number of packet in a window', type=int)
+ parser.add_argument('sliding', help='Number of packet for sliding operations', type=int)
+ parser.add_argument('end_plt', help='End index to plot', type=int)
+ parser.add_argument('noise_level', help='Level for the noise to be removed (pay attention that noise has been '
+ 'removed also when computing Doppler, here you can only remove more noise)',
+ type=float)
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+
+ args = parser.parse_args()
+
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+ sub_sampling = args.sub_sampling
+ noise_lev = args.noise_level
+
+ num_symbols = args.sample_length
+ middle = int(mt.floor(num_symbols / 2))
+
+ n_tot = 4
+ Tc = 7.5e-3
+ fc = 5785e6
+ v_light = 3e8
+ delta_v = round(v_light / (Tc * fc * num_symbols), 3)
+
+ sliding = args.sliding
+ list_subdir = args.subdirs
+
+ for subdir in list_subdir.split(','):
+ path_doppler = args.dir_doppler + subdir
+
+ activity = subdir[0]
+
+ csi_d_antennas = []
+ for i_ant in range(n_tot):
+ path_doppler_name = path_doppler + '/' + subdir + '_stream_' + str(i_ant) + '_bandw' + str(bandwidth) + \
+ '_RU' + str(sub_band) + '_sampling' + str(sub_sampling) + '.txt'
+
+ print(path_doppler_name)
+
+ with open(path_doppler_name, "rb") as fp: # Pickling
+ csi_d_profile_array = pickle.load(fp)
+ csi_d_profile_array[csi_d_profile_array < mt.pow(10, noise_lev)] = mt.pow(10, noise_lev)
+
+ csi_d_profile_array_log = 10 * np.log10(csi_d_profile_array)
+ middle = int(np.floor(csi_d_profile_array_log.shape[1] / 2))
+
+ csi_d_profile_array_log = csi_d_profile_array_log[:min(csi_d_profile_array_log.shape[0], args.end_plt), :]
+
+ csi_d_antennas.append(csi_d_profile_array_log)
+
+ name_p = './plots/csi_doppler_activity_' + subdir + '_' + activity + '_bandw' + str(bandwidth) + \
+ '_RU' + str(sub_band) + '_sampling' + str(sub_sampling) + '.png'
+
+ plt_fft_doppler_antennas(csi_d_antennas, sliding, delta_v, name_p)
diff --git a/Python_code/CSI_network.py b/Python_code/CSI_network.py
new file mode 100644
index 0000000..517ae47
--- /dev/null
+++ b/Python_code/CSI_network.py
@@ -0,0 +1,351 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+from sklearn.metrics import confusion_matrix
+import os
+from dataset_utility import create_dataset_single, expand_antennas
+from network_utility import *
+from sklearn.metrics import precision_recall_fscore_support, accuracy_score
+import shutil
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('train_folders', help='Train folders')
+ parser.add_argument('val_folders', help='Validation folders')
+ parser.add_argument('test_folders', help='Test folders')
+ parser.add_argument('feature_length', help='Length along the feature dimension (height)', type=int)
+ parser.add_argument('sample_length', help='Length along the time dimension (width)', type=int)
+ parser.add_argument('channels', help='Number of channels', type=int)
+ parser.add_argument('batch_size', help='Number of samples in a batch', type=int)
+ parser.add_argument('num_tot', help='Number of antenna * number of spatial streams', type=int)
+ parser.add_argument('name_base', help='Name base for the files')
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--tensorboard', help='Enable tensorboard (default False=0)', default=0, required=False,
+ type=int)
+ args = parser.parse_args()
+
+ gpus = tf.config.experimental.list_physical_devices('GPU')
+ print(gpus)
+
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+ sub_sampling = args.sub_sampling
+
+ train_folders_list = args.train_folders
+ train_folders = []
+ for fold in train_folders_list.split(','):
+ train_folders.append(int(fold))
+
+ val_folders_list = args.val_folders
+ val_folders = []
+ for fold in val_folders_list.split(','):
+ val_folders.append(int(fold))
+
+ test_folders_list = args.test_folders
+ test_folders = []
+ for fold in test_folders_list.split(','):
+ test_folders.append(int(fold))
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+ train_test_val_name = 'train_' + str(np.asarray(train_folders)) + '_val_' \
+ + str(np.asarray(val_folders)) + '_test_' + str(np.asarray(test_folders))
+ folder_name = args.dir + csi_act + suffix + '/' + train_test_val_name + '/'
+
+ name_base = args.name_base + '_' + train_test_val_name + '_' + str(csi_act) + '_' + suffix
+ list_cache_files = os.listdir('./cache_files/')
+ for file_cache in list_cache_files:
+ if file_cache.startswith(name_base):
+ os.remove('./cache_files/' + file_cache)
+
+ if args.tensorboard:
+ if os.path.exists('./logs/train/'):
+ shutil.rmtree('./logs/train/')
+ if os.path.exists('./logs/validation/'):
+ shutil.rmtree('./logs/validation/')
+
+ labels_train = []
+ all_files_train = []
+ labels_val = []
+ all_files_val = []
+ labels_test = []
+ all_files_test = []
+ sample_length = args.sample_length
+ feature_length = args.feature_length
+ channels = args.channels
+ num_antennas = args.num_tot
+ input_shape = (num_antennas, sample_length, feature_length, channels)
+ input_network = (sample_length, feature_length, channels)
+ batch_size = args.batch_size
+ output_shape = activities.shape[0]
+ labels_considered = np.arange(output_shape)
+ activities = activities[labels_considered]
+
+ # TRAIN
+ name_labels = folder_name + 'labels_train_' + str(csi_act) + '.txt'
+ with open(name_labels, "rb") as fp: # Unpickling
+ labels_train.extend(pickle.load(fp))
+ name_f = folder_name + 'files_train_' + str(csi_act) + '.txt'
+ with open(name_f, "rb") as fp: # Unpickling
+ all_files_train.extend(pickle.load(fp))
+
+ # VAL
+ name_labels = folder_name + 'labels_val_' + str(csi_act) + '.txt'
+ with open(name_labels, "rb") as fp: # Unpickling
+ labels_val.extend(pickle.load(fp))
+ name_f = folder_name + 'files_val_' + str(csi_act) + '.txt'
+ with open(name_f, "rb") as fp: # Unpickling
+ all_files_val.extend(pickle.load(fp))
+
+ # TEST
+ name_labels = folder_name + 'labels_test_' + str(csi_act) + '.txt'
+ with open(name_labels, "rb") as fp: # Unpickling
+ labels_test.extend(pickle.load(fp))
+ name_f = folder_name + 'files_test_' + str(csi_act) + '.txt'
+ with open(name_f, "rb") as fp: # Unpickling
+ all_files_test.extend(pickle.load(fp))
+
+ file_train_selected = [all_files_train[idx] for idx in range(len(labels_train)) if labels_train[idx] in
+ labels_considered]
+ labels_train_selected = [labels_train[idx] for idx in range(len(labels_train)) if labels_train[idx] in
+ labels_considered]
+
+ file_train_selected_expanded, labels_train_selected_expanded, stream_ant_train = \
+ expand_antennas(file_train_selected, labels_train_selected, num_antennas)
+
+ name_cache = './cache_files/' + name_base + '_cache_train'
+ dataset_csi_train = create_dataset_single(file_train_selected_expanded, labels_train_selected_expanded,
+ stream_ant_train, input_network, batch_size,
+ shuffle=True, cache_file=name_cache)
+
+ file_val_selected = [all_files_val[idx] for idx in range(len(labels_val)) if labels_val[idx] in
+ labels_considered]
+ labels_val_selected = [labels_val[idx] for idx in range(len(labels_val)) if labels_val[idx] in
+ labels_considered]
+
+ file_val_selected_expanded, labels_val_selected_expanded, stream_ant_val = \
+ expand_antennas(file_val_selected, labels_val_selected, num_antennas)
+
+ name_cache_val = './cache_files/' + name_base + '_cache_val'
+ dataset_csi_val = create_dataset_single(file_val_selected_expanded, labels_val_selected_expanded,
+ stream_ant_val, input_network, batch_size,
+ shuffle=False, cache_file=name_cache_val)
+
+ file_test_selected = [all_files_test[idx] for idx in range(len(labels_test)) if labels_test[idx] in
+ labels_considered]
+ labels_test_selected = [labels_test[idx] for idx in range(len(labels_test)) if labels_test[idx] in
+ labels_considered]
+
+ file_test_selected_expanded, labels_test_selected_expanded, stream_ant_test = \
+ expand_antennas(file_test_selected, labels_test_selected, num_antennas)
+
+ name_cache_test = './cache_files/' + name_base + '_cache_test'
+ dataset_csi_test = create_dataset_single(file_test_selected_expanded, labels_test_selected_expanded,
+ stream_ant_test, input_network, batch_size,
+ shuffle=False, cache_file=name_cache_test)
+
+ csi_model = csi_network_inc_res(input_network, output_shape)
+ csi_model.summary()
+
+ optimiz = tf.keras.optimizers.Adam(learning_rate=5e-4)
+
+ loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits='True')
+ csi_model.compile(optimizer=optimiz, loss=loss, metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
+
+ num_samples_train = len(file_train_selected_expanded)
+ num_samples_val = len(file_val_selected_expanded)
+ num_samples_test = len(file_test_selected_expanded)
+ lab, count = np.unique(labels_train_selected_expanded, return_counts=True)
+ lab_val, count_val = np.unique(labels_val_selected_expanded, return_counts=True)
+ lab_test, count_test = np.unique(labels_test_selected_expanded, return_counts=True)
+ train_steps_per_epoch = int(np.ceil(num_samples_train/batch_size))
+ val_steps_per_epoch = int(np.ceil(num_samples_val/batch_size))
+ test_steps_per_epoch = int(np.ceil(num_samples_test/batch_size))
+
+ callback_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
+
+ name_model = './networks/' + name_base + '_network.h5'
+ callback_save = tf.keras.callbacks.ModelCheckpoint(name_model, save_freq='epoch', save_best_only=True,
+ monitor='val_sparse_categorical_accuracy')
+
+ callbacks = [callback_save] # callback_stop
+ if args.tensorboard:
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
+ callbacks.append(tensorboard_callback)
+
+ results = csi_model.fit(dataset_csi_train, epochs=20, steps_per_epoch=train_steps_per_epoch,
+ validation_data=dataset_csi_val, validation_steps=val_steps_per_epoch,
+ callbacks=callbacks)
+
+ csi_model.save(name_model)
+
+ csi_model = tf.keras.models.load_model(name_model)
+
+ # TRAIN
+ # train_labels_true = np.array(labels_train_selected_expanded)
+ #
+ # name_cache_train_test = './cache_files/' + name_base + '_cache_train_test'
+ # dataset_csi_train_test = create_dataset_single(file_train_selected_expanded, labels_train_selected_expanded,
+ # stream_ant_train, input_network, batch_size,
+ # shuffle=False, cache_file=name_cache_train_test, prefetch=False)
+ # train_prediction_list = csi_model.predict(dataset_csi_train_test,
+ # steps=train_steps_per_epoch)[:train_labels_true.shape[0]]
+ #
+ # train_labels_pred = np.argmax(train_prediction_list, axis=1)
+ #
+ # conf_matrix_train = confusion_matrix(train_labels_true, train_labels_pred)
+
+ # VAL
+ # val_labels_true = np.array(labels_val_selected_expanded)
+ # val_prediction_list = csi_model.predict(dataset_csi_val, steps=val_steps_per_epoch)[:val_labels_true.shape[0]]
+ #
+ # val_labels_pred = np.argmax(val_prediction_list, axis=1)
+ #
+ # conf_matrix_val = confusion_matrix(val_labels_true, val_labels_pred)
+
+ # TEST
+ print('TEST')
+ test_labels_true = np.array(labels_test_selected_expanded)
+
+ test_prediction_list = csi_model.predict(dataset_csi_test, steps=test_steps_per_epoch)[
+ :test_labels_true.shape[0]]
+
+ test_labels_pred = np.argmax(test_prediction_list, axis=1)
+
+ conf_matrix = confusion_matrix(test_labels_true, test_labels_pred)
+ print('Set labels true: ', set(test_labels_true))
+ print('Set labels pred: ', set(test_labels_pred))
+ precision, recall, fscore, _ = precision_recall_fscore_support(test_labels_true,
+ test_labels_pred,
+ labels=labels_considered)
+ accuracy = accuracy_score(test_labels_true, test_labels_pred)
+
+ # merge antennas test
+ labels_true_merge = np.array(labels_test_selected)
+ pred_max_merge = np.zeros_like(labels_test_selected)
+ for i_lab in range(len(labels_test_selected)):
+ pred_antennas = test_prediction_list[i_lab * num_antennas:(i_lab + 1) * num_antennas, :]
+ lab_merge_max = np.argmax(np.sum(pred_antennas, axis=0))
+
+ pred_max_antennas = test_labels_pred[i_lab * num_antennas:(i_lab + 1) * num_antennas]
+ lab_unique, count = np.unique(pred_max_antennas, return_counts=True)
+ lab_max_merge = -1
+ if lab_unique.shape[0] > 1:
+ count_argsort = np.flip(np.argsort(count))
+ count_sort = count[count_argsort]
+ lab_unique_sort = lab_unique[count_argsort]
+ if count_sort[0] == count_sort[1] or lab_unique.shape[0] > 2: # ex aequo between two labels_train
+ lab_max_merge = lab_merge_max
+ else:
+ lab_max_merge = lab_unique_sort[0]
+ else:
+ lab_max_merge = lab_unique[0]
+ pred_max_merge[i_lab] = lab_max_merge
+
+ conf_matrix_max_merge = confusion_matrix(labels_true_merge, pred_max_merge, labels=labels_considered)
+ precision_max_merge, recall_max_merge, fscore_max_merge, _ = \
+ precision_recall_fscore_support(labels_true_merge, pred_max_merge, labels=labels_considered)
+ accuracy_max_merge = accuracy_score(labels_true_merge, pred_max_merge)
+
+ metrics_matrix_dict = {'conf_matrix': conf_matrix,
+ 'accuracy_single': accuracy,
+ 'precision_single': precision,
+ 'recall_single': recall,
+ 'fscore_single': fscore,
+ 'conf_matrix_max_merge': conf_matrix_max_merge,
+ 'accuracy_max_merge': accuracy_max_merge,
+ 'precision_max_merge': precision_max_merge,
+ 'recall_max_merge': recall_max_merge,
+ 'fscore_max_merge': fscore_max_merge}
+
+ name_file = './outputs/test_' + name_base + '.txt'
+ with open(name_file, "wb") as fp: # Pickling
+ pickle.dump(metrics_matrix_dict, fp)
+
+ # impact of the number of antennas
+ one_antenna = [[0], [1], [2], [3]]
+ two_antennas = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
+ three_antennas = [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
+ four_antennas = [[0, 1, 2, 3]]
+ seq_ant_list = [one_antenna, two_antennas, three_antennas, four_antennas]
+ average_accuracy_change_num_ant = np.zeros((num_antennas,))
+ average_fscore_change_num_ant = np.zeros((num_antennas,))
+ labels_true_merge = np.array(labels_test_selected)
+ for ant_n in range(num_antennas):
+ seq_ant = seq_ant_list[ant_n]
+ num_seq = len(seq_ant)
+ for seq_n in range(num_seq):
+ pred_max_merge = np.zeros((len(labels_test_selected),))
+ ants_selected = seq_ant[seq_n]
+ for i_lab in range(len(labels_test_selected)):
+ pred_antennas = test_prediction_list[i_lab * num_antennas:(i_lab + 1) * num_antennas, :]
+ pred_antennas = pred_antennas[ants_selected, :]
+
+ lab_merge_max = np.argmax(np.sum(pred_antennas, axis=0))
+
+ pred_max_antennas = test_labels_pred[i_lab * num_antennas:(i_lab + 1) * num_antennas]
+ pred_max_antennas = pred_max_antennas[ants_selected]
+ lab_unique, count = np.unique(pred_max_antennas, return_counts=True)
+ lab_max_merge = -1
+ if lab_unique.shape[0] > 1:
+ count_argsort = np.flip(np.argsort(count))
+ count_sort = count[count_argsort]
+ lab_unique_sort = lab_unique[count_argsort]
+ if count_sort[0] == count_sort[1] or lab_unique.shape[0] > ant_n - 1: # ex aequo between two labels_train
+ lab_max_merge = lab_merge_max
+ else:
+ lab_max_merge = lab_unique_sort[0]
+ else:
+ lab_max_merge = lab_unique[0]
+ pred_max_merge[i_lab] = lab_max_merge
+
+ _, _, fscore_max_merge, _ = precision_recall_fscore_support(labels_true_merge, pred_max_merge,
+ labels=[0, 1, 2, 3, 4])
+ accuracy_max_merge = accuracy_score(labels_true_merge, pred_max_merge)
+
+ average_accuracy_change_num_ant[ant_n] += accuracy_max_merge
+ average_fscore_change_num_ant[ant_n] += np.mean(fscore_max_merge)
+
+ average_accuracy_change_num_ant[ant_n] = average_accuracy_change_num_ant[ant_n] / num_seq
+ average_fscore_change_num_ant[ant_n] = average_fscore_change_num_ant[ant_n] / num_seq
+
+ metrics_matrix_dict = {'average_accuracy_change_num_ant': average_accuracy_change_num_ant,
+ 'average_fscore_change_num_ant': average_fscore_change_num_ant}
+
+ name_file = './outputs/change_number_antennas_test_' + name_base + '.txt'
+ with open(name_file, "wb") as fp: # Pickling
+ pickle.dump(metrics_matrix_dict, fp)
+
+ list_cache_files = os.listdir('./cache_files/')
+ for file_cache in list_cache_files:
+ if file_cache.startswith(name_base):
+ os.remove('./cache_files/' + file_cache)
diff --git a/Python_code/CSI_network_metrics.py b/Python_code/CSI_network_metrics.py
new file mode 100644
index 0000000..e963b54
--- /dev/null
+++ b/Python_code/CSI_network_metrics.py
@@ -0,0 +1,120 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('train_folders', help='Train folders')
+ parser.add_argument('val_folders', help='Validation folders')
+ parser.add_argument('test_folders', help='Test folders')
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('name_base', help='Name base for the files')
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+ args = parser.parse_args()
+
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+ sub_sampling = args.sub_sampling
+
+ train_folders_list = args.train_folders
+ train_folders = []
+ for fold in train_folders_list.split(','):
+ train_folders.append(int(fold))
+
+ val_folders_list = args.val_folders
+ val_folders = []
+ for fold in val_folders_list.split(','):
+ val_folders.append(int(fold))
+
+ test_folders_list = args.test_folders
+ test_folders = []
+ for fold in test_folders_list.split(','):
+ test_folders.append(int(fold))
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+ train_test_val_name = 'train_' + str(np.asarray(train_folders)) + '_val_' \
+ + str(np.asarray(val_folders)) + '_test_' + str(np.asarray(test_folders))
+ name_base = args.name_base + '_' + train_test_val_name + '_' + str(csi_act) + '_' + suffix
+
+ name_file = './outputs/test_' + name_base + '.txt'
+
+ with open(name_file, "rb") as fp: # Pickling
+ conf_matrix_dict = pickle.load(fp)
+
+ conf_matrix = conf_matrix_dict['conf_matrix']
+ confusion_matrix_normaliz_row = np.transpose(conf_matrix / np.sum(conf_matrix, axis=1).reshape(-1, 1))
+ accuracies = np.diag(confusion_matrix_normaliz_row)
+ accuracy = conf_matrix_dict['accuracy_single']
+ precision = conf_matrix_dict['precision_single']
+ recall = conf_matrix_dict['recall_single']
+ fscore = conf_matrix_dict['fscore_single']
+ average_prec = np.mean(precision)
+ average_rec = np.mean(recall)
+ average_f = np.mean(recall)
+ print('single antenna - average accuracy %f, average precision %f, average recall %f, average fscore %f'
+ % (accuracy, average_prec, average_rec, average_f))
+ print('fscores - empty %f, sitting %f, walking %f, running %f'
+ % (fscore[0], fscore[1], fscore[2], fscore[3]))
+ print('average fscore %f' % (np.mean(fscore)))
+ print('accuracies - empty %f, sitting %f, walking %f, running %f'
+ % (accuracies[0], accuracies[1], accuracies[2], accuracies[3]))
+
+ conf_matrix_max_merge = conf_matrix_dict['conf_matrix_max_merge']
+ conf_matrix_max_merge_normaliz_row = conf_matrix_max_merge / np.sum(conf_matrix_max_merge, axis=1).reshape(-1, 1)
+ accuracies_max_merge = np.diag(conf_matrix_max_merge_normaliz_row)
+ accuracy_max_merge = conf_matrix_dict['accuracy_max_merge']
+ precision_max_merge = conf_matrix_dict['precision_max_merge']
+ recall_max_merge = conf_matrix_dict['recall_max_merge']
+ fscore_max_merge = conf_matrix_dict['fscore_max_merge']
+ average_max_merge_prec = np.mean(precision_max_merge)
+ average_max_merge_rec = np.mean(recall_max_merge)
+ average_max_merge_f = np.mean(fscore_max_merge)
+ print('\n-- FINAL DECISION --')
+ print('max-merge - average accuracy %f, average precision %f, average recall %f, average fscore %f'
+ % (accuracy_max_merge, average_max_merge_prec, average_max_merge_rec, average_max_merge_f))
+ print('fscores - empty %f, sitting %f, walking %f, running %f'
+ % (fscore_max_merge[0], fscore_max_merge[1], fscore_max_merge[2], fscore_max_merge[3]))
+ print('accuracies - empty %f, sitting %f, walking %f, running %f'
+ % (accuracies_max_merge[0], accuracies_max_merge[1], accuracies_max_merge[2], accuracies_max_merge[3]))
+
+ # performance assessment by changing the number of monitor antennas
+ name_file = './outputs/change_number_antennas_test_' + name_base + '.txt'
+ with open(name_file, "rb") as fp: # Pickling
+ metrics_matrix_dict = pickle.load(fp)
+
+ average_accuracy_change_num_ant = metrics_matrix_dict['average_accuracy_change_num_ant']
+ average_fscore_change_num_ant = metrics_matrix_dict['average_fscore_change_num_ant']
+ print('\naccuracies - one antenna %f, two antennas %f, three antennas %f, four antennas %f'
+ % (average_accuracy_change_num_ant[0], average_accuracy_change_num_ant[1], average_accuracy_change_num_ant[2],
+ average_accuracy_change_num_ant[3]))
+ print('fscores - one antenna %f, two antennas %f, three antennas %f, four antennas %f'
+ % (average_fscore_change_num_ant[0], average_fscore_change_num_ant[1], average_fscore_change_num_ant[2],
+ average_fscore_change_num_ant[3]))
diff --git a/Python_code/CSI_network_metrics_cross_val.py b/Python_code/CSI_network_metrics_cross_val.py
new file mode 100644
index 0000000..bc42cd8
--- /dev/null
+++ b/Python_code/CSI_network_metrics_cross_val.py
@@ -0,0 +1,160 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+from itertools import combinations, permutations
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('n_tot', help='Number of streams * number of antennas', type=int)
+ parser.add_argument('names_base', help='Names base for the files')
+ parser.add_argument('num_folders', help='Number of folders', type=int)
+ parser.add_argument('num_folders_train', help='Number of folders for training', type=int)
+ parser.add_argument('num_folders_val', help='Number of folders for validation', type=int)
+ parser.add_argument('--bandwidth', help='Bandwidth in [MHz] to select the subcarriers, can be 20, 40, 80 '
+ '(default 80)', default=80, required=False, type=int)
+ parser.add_argument('--sub_band', help='Sub_band idx in [1, 2, 3, 4] for 20 MHz, [1, 2] for 40 MHz '
+ '(default 1)', default=1, required=False, type=int)
+ parser.add_argument('--sub_sampling', help='Sampling in [1, 2, 3, 4, 5]'
+ '(default 1)', default=1, required=False, type=int)
+ args = parser.parse_args()
+
+ n_antennas = args.n_tot
+ num_folders = args.num_folders
+ num_folders_train = args.num_folders_train
+ num_folders_val = args.num_folders_val
+
+ bandwidth = args.bandwidth
+ sub_band = args.sub_band
+ sub_sampling = args.sub_sampling
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+ num_act = activities.shape[0]
+
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+
+ folders_idx = list(np.arange(num_folders))
+ num_elements_comb = 2
+ list_sets_name = ['train', 'val', 'test']
+ num_elements_permut = 2
+
+ n_combinations = np.math.factorial(num_folders) / (np.math.factorial(num_elements_comb) * np.math.factorial(num_folders - num_elements_comb))
+ n_permutations = np.math.factorial(num_elements_permut)
+ n_comb_perm = int(n_combinations * n_permutations)
+
+ names_string = args.names_base
+ names_files = []
+ for nam in names_string.split(','):
+ names_files.append(nam)
+ names_files = np.asarray(names_files)
+ num_files = names_files.shape[0]
+
+ n_tot_entries = n_comb_perm*num_files
+ accuracies_cross_val = np.zeros((n_tot_entries, num_act))
+ fscores_cross_val = np.zeros((n_tot_entries, num_act))
+ avg_accuracies_cross_val_antennas = np.zeros((n_tot_entries, n_antennas))
+ avg_fscores_cross_val_antennas = np.zeros((n_tot_entries, n_antennas))
+
+ for idx_name, name_b in enumerate(names_files):
+
+ index_comb_perm = 0
+ comb_train = combinations(folders_idx, num_elements_comb)
+ for train_set in comb_train:
+ folders_idx_val_test = set(folders_idx).difference(train_set)
+ perm_val_test = permutations(folders_idx_val_test, num_elements_permut)
+ for val_test_indices in perm_val_test:
+ val_indices = list(val_test_indices[:num_folders_val])
+ test_indices = list(val_test_indices[num_folders_val:])
+ train_indices = list(train_set)
+
+ train_test_val_name = 'train_' + str(np.asarray(train_indices)+1) + '_val_' \
+ + str(np.asarray(val_indices)+1) + '_test_' + str(np.asarray(test_indices)+1)
+ name_base = name_b + '_' + train_test_val_name + '_' + str(csi_act) + '_' + suffix
+
+ name_file = './outputs/test_' + name_base + '.txt'
+
+ try:
+ with open(name_file, "rb") as fp: # Pickling
+ conf_matrix_dict = pickle.load(fp)
+ except FileNotFoundError:
+ print(name_file, ' not found')
+ continue
+
+ # MERGE ANTENNAS
+ conf_matrix_max_merge = conf_matrix_dict['conf_matrix_max_merge']
+ conf_matrix_max_merge_normaliz_row = conf_matrix_max_merge / \
+ np.sum(conf_matrix_max_merge, axis=1).reshape(-1, 1)
+ accuracies_max_merge = np.diag(conf_matrix_max_merge_normaliz_row)
+ accuracy_max_merge = conf_matrix_dict['accuracy_max_merge']
+ precision_max_merge = conf_matrix_dict['precision_max_merge']
+ recall_max_merge = conf_matrix_dict['recall_max_merge']
+ fscore_max_merge = conf_matrix_dict['fscore_max_merge']
+ average_max_merge_prec = np.mean(precision_max_merge)
+ average_max_merge_rec = np.mean(recall_max_merge)
+ average_max_merge_f = np.mean(fscore_max_merge)
+ print('\n-- FINAL DECISION --')
+ print('max-merge - average accuracy %f, average precision %f, average recall %f, average fscore %f'
+ % (accuracy_max_merge, average_max_merge_prec, average_max_merge_rec, average_max_merge_f))
+ print('fscores - empty %f, sitting %f, walking %f, running %f'
+ % (fscore_max_merge[0], fscore_max_merge[1], fscore_max_merge[2], fscore_max_merge[3]))
+ print('accuracies - empty %f, sitting %f, walking %f, running %f'
+ % (accuracies_max_merge[0], accuracies_max_merge[1], accuracies_max_merge[2], accuracies_max_merge[3]))
+
+ accuracies_cross_val[idx_name*n_comb_perm + index_comb_perm, :] = accuracies_max_merge
+ fscores_cross_val[idx_name*n_comb_perm + index_comb_perm, :] = fscore_max_merge
+
+ # CHANGING THE NUMBER OF MONITOR ANTENNAS
+ name_file = './outputs/change_number_antennas_test_' + name_base + '.txt'
+ with open(name_file, "rb") as fp: # Pickling
+ metrics_matrix_dict = pickle.load(fp)
+
+ average_accuracy_change_num_ant = metrics_matrix_dict['average_accuracy_change_num_ant']
+ average_fscore_change_num_ant = metrics_matrix_dict['average_fscore_change_num_ant']
+ print('\naccuracies - one antenna %f, two antennas %f, three antennas %f, four antennas %f'
+ % (average_accuracy_change_num_ant[0], average_accuracy_change_num_ant[1], average_accuracy_change_num_ant[2],
+ average_accuracy_change_num_ant[3]))
+ print('fscores - one antenna %f, two antennas %f, three antennas %f, four antennas %f'
+ % (average_fscore_change_num_ant[0], average_fscore_change_num_ant[1], average_fscore_change_num_ant[2],
+ average_fscore_change_num_ant[3]))
+
+ avg_accuracies_cross_val_antennas[idx_name*n_comb_perm + index_comb_perm, :] = average_accuracy_change_num_ant
+ avg_fscores_cross_val_antennas[idx_name*n_comb_perm + index_comb_perm, :] = average_fscore_change_num_ant
+
+ index_comb_perm += 1
+
+ avg_accuracies_cross_val = np.mean(accuracies_cross_val, axis=0)
+ avg_accuracy_cross_val = np.mean(avg_accuracies_cross_val)
+
+ avg_fscores_cross_val = np.mean(fscores_cross_val, axis=0)
+ avg_fscore_cross_val = np.mean(avg_fscores_cross_val)
+
+ metrics_matrix_dict = {'accuracies_cross_val': accuracies_cross_val,
+ 'avg_accuracies_cross_val': avg_accuracies_cross_val,
+ 'fscores_cross_val': fscores_cross_val,
+ 'avg_fscores_cross_val': avg_fscores_cross_val
+ }
+
+ name_file_save = './evaluations/' + args.names_base + '_' + str(csi_act) + '_' + suffix + '.txt'
+ with open(name_file_save, "wb") as fp: # Pickling
+ pickle.dump(metrics_matrix_dict, fp)
diff --git a/Python_code/CSI_network_metrics_cross_val_plots_different_bandwidth.py b/Python_code/CSI_network_metrics_cross_val_plots_different_bandwidth.py
new file mode 100644
index 0000000..8846ac0
--- /dev/null
+++ b/Python_code/CSI_network_metrics_cross_val_plots_different_bandwidth.py
@@ -0,0 +1,213 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+from itertools import combinations, permutations
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+import matplotlib.cbook as cbook
+from matplotlib.patches import Polygon
+import scipy.stats as st
+import scipy.optimize as so
+from matplotlib.ticker import MultipleLocator
+from matplotlib.ticker import FuncFormatter
+from matplotlib.lines import Line2D
+
+mpl.rcParams['font.family'] = 'serif'
+mpl.rcParams['font.serif'] = 'Palatino'
+mpl.rcParams['text.usetex'] = 'true'
+mpl.rcParams['text.latex.preamble'] = [r'\usepackage{newtxmath}']
+mpl.rcParams['font.size'] = 14
+mpl.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Accent.colors)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('names_base', help='Names base for the files')
+ args = parser.parse_args()
+
+ names_string = args.names_base
+ names_files = []
+ for nam in names_string.split(','):
+ names_files.append(nam)
+ names_files = np.asarray(names_files)
+ num_files = names_files.shape[0]
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+ num_act = activities.shape[0]
+
+ #################################
+ # BOX PLOT DIFFERENT BANDWIDTHS
+ #################################
+ n_entries = 7
+ band_subband = [[80, 1], [40, 1], [40, 2], [20, 1], [20, 2], [20, 3], [20, 4]]
+ band_subband_names = [r'RU1-996', r'RU1-484', r'RU2-484', r'RU1-242', r'RU2-242', r'RU3-242', r'RU4-242']
+
+ avg_accuracies_cross_val = np.zeros((n_entries, num_act))
+ avg_fscores_cross_val = np.zeros((n_entries, num_act))
+
+ num_cross_val = 12
+ avg_accuracies_activities = np.zeros((n_entries, num_cross_val*num_files))
+ avg_fscores_activities = np.zeros((n_entries, num_cross_val*num_files))
+
+ for idx, entry in enumerate(band_subband):
+ bandwidth = entry[0]
+ sub_band = entry[1]
+ sub_sampling = 1
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+
+ name_file_save = './evaluations/' + args.names_base + '_' + str(csi_act) + '_' + suffix + '.txt'
+ with open(name_file_save, "rb") as fp: # Pickling
+ metrics_matrix_dict = pickle.load(fp)
+
+ accuracies_cross_val = metrics_matrix_dict['accuracies_cross_val']
+ fscores_cross_val = metrics_matrix_dict['fscores_cross_val']
+
+ avg_accuracies_cross_val[idx, :] = np.mean(accuracies_cross_val, axis=0)
+ avg_accuracies_activities[idx, :] = np.mean(accuracies_cross_val, axis=1)
+
+ avg_fscores_cross_val[idx, :] = np.mean(fscores_cross_val, axis=0)
+ avg_fscores_activities[idx, :] = np.mean(fscores_cross_val, axis=1)
+
+ stats_accuracies_cross_val = []
+ stats_accuracies_activities = []
+ stats_fscores_cross_val = []
+ stats_fscores_activities = []
+ for idx in range(n_entries):
+ stats_accuracies_cross_val.append(cbook.boxplot_stats(avg_accuracies_cross_val[idx], whis=(5, 95))[0])
+ stats_accuracies_activities.append(cbook.boxplot_stats(avg_accuracies_activities[idx], whis=(5, 95))[0])
+ stats_fscores_cross_val.append(cbook.boxplot_stats(avg_fscores_cross_val[idx], whis=(5, 95))[0])
+ stats_fscores_activities.append(cbook.boxplot_stats(avg_fscores_activities[idx], whis=(5, 95))[0])
+
+ # plot accuracies
+ stats = [stats_accuracies_cross_val, stats_accuracies_activities]
+ stats_names = ['stats_accuracies_cross_val', 'stats_accuracies_activities']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(n_entries), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(band_subband_names)
+ plt.grid(which='both')
+ plt.ylim([0.3, 1])
+ plt.yticks(np.linspace(0.3, 1, 8), np.linspace(30, 100, 8, dtype=int))
+ plt.xlabel(r'resource unit')
+ plt.ylabel(r'accuracy [$\%$]')
+ name_fig = './plots/change_bw_' + stats_names[idx_st] + '.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot fscores
+ stats = [stats_fscores_cross_val, stats_fscores_activities]
+ stats_names = ['stats_fscores_cross_val', 'stats_fscores_activities']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(n_entries), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(band_subband_names)
+ plt.grid(which='both')
+ plt.ylim([0.3, 1])
+ plt.yticks(np.linspace(0.3, 1, 8))
+ plt.xlabel(r'resource unit')
+ plt.ylabel(r'F1-score')
+ name_fig = './plots/change_bw_' + stats_names[idx_st] + '.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot accuracy f-score together cross-val
+ stats = [stats_accuracies_activities, stats_fscores_activities]
+
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(7.2, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stats[0], positions=np.arange(n_entries) - 0.12, showfliers=False, widths=0.24,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ bp = ax.bxp(stats[1], positions=np.arange(n_entries) + 0.12, showfliers=False, widths=0.24,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C1'))
+
+ ax.set_xticks(np.arange(n_entries))
+ ax.set_xticklabels(band_subband_names)
+ plt.grid(which='both')
+ plt.ylim([0.3, 1])
+ plt.yticks(np.linspace(0.3, 1, 8))
+ plt.xlabel(r'resource unit')
+ plt.ylabel(r'metric')
+ custom_lines = [Line2D([0], [0], color='C4', linewidth=4, alpha=0.7),
+ Line2D([0], [0], color='C1', linewidth=4)]
+ plt.legend(custom_lines, [r'accuracy', r'F1-score'],
+ ncol=1, labelspacing=0.2, columnspacing=0.5, fontsize='medium')#, loc='lower right')
+ name_fig = './plots/change_bw_accuracy_fscore_activities.pdf'
+ plt.savefig(name_fig)
+ plt.close()
diff --git a/Python_code/CSI_network_metrics_cross_val_plots_different_samplings.py b/Python_code/CSI_network_metrics_cross_val_plots_different_samplings.py
new file mode 100644
index 0000000..1fd6489
--- /dev/null
+++ b/Python_code/CSI_network_metrics_cross_val_plots_different_samplings.py
@@ -0,0 +1,216 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+from itertools import combinations, permutations
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+import matplotlib.cbook as cbook
+from matplotlib.patches import Polygon
+import scipy.stats as st
+import scipy.optimize as so
+from matplotlib.ticker import MultipleLocator
+from matplotlib.ticker import FuncFormatter
+from matplotlib.lines import Line2D
+
+mpl.rcParams['font.family'] = 'serif'
+mpl.rcParams['font.serif'] = 'Palatino'
+mpl.rcParams['text.usetex'] = 'true'
+mpl.rcParams['text.latex.preamble'] = [r'\usepackage{newtxmath}']
+mpl.rcParams['font.size'] = 14
+mpl.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Accent.colors)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('names_base', help='Names base for the files')
+ args = parser.parse_args()
+
+ names_string = args.names_base
+ names_files = []
+ for nam in names_string.split(','):
+ names_files.append(nam)
+ names_files = np.asarray(names_files)
+ num_files = names_files.shape[0]
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+ num_act = activities.shape[0]
+
+ #################################
+ # BOX PLOT DIFFERENT SAMPLINGS
+ #################################
+ samplings = np.arange(1, 6)
+ n_entries = samplings.shape[0]
+
+ avg_accuracies_cross_val = np.zeros((n_entries, num_act))
+ avg_fscores_cross_val = np.zeros((n_entries, num_act))
+
+ num_cross_val = 12
+ avg_accuracies_activities = np.zeros((n_entries, num_cross_val*num_files))
+ avg_fscores_activities = np.zeros((n_entries, num_cross_val*num_files))
+
+ for idx, entry in enumerate(samplings):
+ bandwidth = 80
+ sub_band = 1
+ sub_sampling = entry
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+
+ name_file_save = './evaluations/' + args.names_base + '_' + str(csi_act) + '_' + suffix + '.txt'
+ try:
+ with open(name_file_save, "rb") as fp: # Pickling
+ metrics_matrix_dict = pickle.load(fp)
+ except FileNotFoundError:
+ print(name_file_save, ' not found')
+ continue
+
+ accuracies_cross_val = metrics_matrix_dict['accuracies_cross_val']
+ fscores_cross_val = metrics_matrix_dict['fscores_cross_val']
+
+ avg_accuracies_cross_val[idx, :] = np.mean(accuracies_cross_val, axis=0)
+ avg_accuracies_activities[idx, :] = np.mean(accuracies_cross_val, axis=1)
+
+ avg_fscores_cross_val[idx, :] = np.mean(fscores_cross_val, axis=0)
+ avg_fscores_activities[idx, :] = np.mean(fscores_cross_val, axis=1)
+
+ stats_accuracies_cross_val = []
+ stats_accuracies_activities = []
+ stats_fscores_cross_val = []
+ stats_fscores_activities = []
+ for idx in range(n_entries):
+ stats_accuracies_cross_val.append(cbook.boxplot_stats(avg_accuracies_cross_val[idx], whis=(5, 95))[0])
+ stats_accuracies_activities.append(cbook.boxplot_stats(avg_accuracies_activities[idx], whis=(5, 95))[0])
+ stats_fscores_cross_val.append(cbook.boxplot_stats(avg_fscores_cross_val[idx], whis=(5, 95))[0])
+ stats_fscores_activities.append(cbook.boxplot_stats(avg_fscores_activities[idx], whis=(5, 95))[0])
+
+ # plot accuracies
+ stats = [stats_accuracies_cross_val, stats_accuracies_activities]
+ stats_names = ['stats_accuracies_cross_val', 'stats_accuracies_activities']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(n_entries), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(samplings)
+ plt.grid(which='both')
+ plt.ylim([0.5, 1])
+ plt.yticks(np.linspace(0.5, 1, 6), np.linspace(50, 100, 6, dtype=int))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'accuracy [$\%$]')
+ name_fig = './plots/change_sampl_' + stats_names[idx_st] + '.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot fscores
+ stats = [stats_fscores_cross_val, stats_fscores_activities]
+ stats_names = ['stats_fscores_cross_val', 'stats_fscores_activities']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(n_entries), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(samplings)
+ plt.grid(which='both')
+ plt.ylim([0.4, 1])
+ plt.yticks(np.linspace(0.4, 1, 7))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'F1-score')
+ name_fig = './plots/change_sampl_' + stats_names[idx_st] + '.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot accuracy f-score together cross-val
+ stats = [stats_accuracies_activities, stats_fscores_activities]
+
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(7.2, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stats[0], positions=np.arange(n_entries) - 0.08, showfliers=False, widths=0.16,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ bp = ax.bxp(stats[1], positions=np.arange(n_entries) + 0.08, showfliers=False, widths=0.16,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C1'))
+
+ ax.set_xticks(np.arange(n_entries))
+ ax.set_xticklabels(samplings)
+ plt.grid(which='both')
+ plt.ylim([0.4, 1])
+ plt.yticks(np.linspace(0.4, 1, 7))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'metric')
+ custom_lines = [Line2D([0], [0], color='C4', linewidth=4, alpha=0.7),
+ Line2D([0], [0], color='C1', linewidth=4)]
+ plt.legend(custom_lines, [r'accuracy', r'F1-score'],
+ ncol=2, labelspacing=0.2, columnspacing=0.5, fontsize='medium', loc='upper right')
+ name_fig = './plots/change_sampl_accuracy_fscore_activities.pdf'
+ plt.savefig(name_fig)
+ plt.close()
diff --git a/Python_code/CSI_network_metrics_cross_val_plots_different_samplings_combined.py b/Python_code/CSI_network_metrics_cross_val_plots_different_samplings_combined.py
new file mode 100644
index 0000000..ff60ccf
--- /dev/null
+++ b/Python_code/CSI_network_metrics_cross_val_plots_different_samplings_combined.py
@@ -0,0 +1,243 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import pickle
+from itertools import combinations, permutations
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+import matplotlib.cbook as cbook
+from matplotlib.patches import Polygon
+import scipy.stats as st
+import scipy.optimize as so
+from matplotlib.ticker import MultipleLocator
+from matplotlib.ticker import FuncFormatter
+from matplotlib.lines import Line2D
+
+mpl.rcParams['font.family'] = 'serif'
+mpl.rcParams['font.serif'] = 'Palatino'
+mpl.rcParams['text.usetex'] = 'true'
+mpl.rcParams['text.latex.preamble'] = [r'\usepackage{newtxmath}']
+mpl.rcParams['font.size'] = 14
+mpl.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Accent.colors)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('activities', help='Activities to be considered')
+ parser.add_argument('names_base1', help='Names base for the files')
+ parser.add_argument('names_base2', help='Names base for the files diff subsamples')
+ args = parser.parse_args()
+
+ names_string1 = args.names_base1
+ names_string2 = args.names_base2
+ names_files1 = []
+ names_files2 = []
+ for nam in names_string1.split(','):
+ names_files1.append(nam)
+ for nam in names_string2.split(','):
+ names_files2.append(nam)
+ names_string1 = ','.join(names_files1)
+ names_string2 = ','.join(names_files2)
+ names_string = [names_string1, names_string2]
+ names_files1 = np.asarray(names_files1)
+ names_files2 = np.asarray(names_files2)
+ num_files = names_files1.shape[0]
+
+ csi_act = args.activities
+ activities = []
+ for lab_act in csi_act.split(','):
+ activities.append(lab_act)
+ activities = np.asarray(activities)
+ num_act = activities.shape[0]
+
+ #################################
+ # BOX PLOT DIFFERENT SAMPLINGS
+ #################################
+ samplings = np.arange(1, 6)
+ n_entries = samplings.shape[0]
+
+ avg_accuracies_cross_val = np.zeros((num_files, num_act))
+ avg_fscores_cross_val = np.zeros((num_files, num_act))
+
+ num_cross_val = 12
+ avg_accuracies_activities = np.zeros((num_files, num_cross_val*num_files))
+ avg_fscores_activities = np.zeros((num_files, num_cross_val*num_files))
+
+ positions1 = [0, 1, 3, 5, 7]
+ indices1 = np.arange(len(positions1))
+ positions2 = [2, 4, 6, 8]
+ indices2 = np.arange(len(positions2)) + 1
+ positions_list = [positions1, positions2]
+ indices_list = [indices1, indices2]
+ for idx_s, name in enumerate(names_string):
+ positions = positions_list[idx_s]
+ indices = indices_list[idx_s]
+ for idx_ in range(len(positions)):
+ idx_plot = positions[idx_]
+ idx = indices[idx_]
+ print(idx_plot)
+ bandwidth = 80
+ sub_band = 1
+ sub_sampling = idx + 1
+ suffix = '_bandw' + str(bandwidth) + '_RU' + str(sub_band) + '_sampling' + str(sub_sampling)
+
+ name_file_save = './evaluations/' + name + '_' + str(csi_act) + '_' + suffix + '.txt'
+ try:
+ with open(name_file_save, "rb") as fp: # Pickling
+ metrics_matrix_dict = pickle.load(fp)
+ except FileNotFoundError:
+ print(name_file_save, ' not found')
+ continue
+
+ accuracies_cross_val = metrics_matrix_dict['accuracies_cross_val']
+ fscores_cross_val = metrics_matrix_dict['fscores_cross_val']
+
+ avg_accuracies_cross_val[idx_plot, :] = np.mean(accuracies_cross_val, axis=0)
+ avg_accuracies_activities[idx_plot, :] = np.mean(accuracies_cross_val, axis=1)
+
+ avg_fscores_cross_val[idx_plot, :] = np.mean(fscores_cross_val, axis=0)
+ avg_fscores_activities[idx_plot, :] = np.mean(fscores_cross_val, axis=1)
+
+ stats_accuracies_cross_val = []
+ stats_accuracies_activities = []
+ stats_fscores_cross_val = []
+ stats_fscores_activities = []
+ for idx in range(num_files):
+ stats_accuracies_cross_val.append(cbook.boxplot_stats(avg_accuracies_cross_val[idx], whis=(5, 95))[0])
+ stats_accuracies_activities.append(cbook.boxplot_stats(avg_accuracies_activities[idx], whis=(5, 95))[0])
+ stats_fscores_cross_val.append(cbook.boxplot_stats(avg_fscores_cross_val[idx], whis=(5, 95))[0])
+ stats_fscores_activities.append(cbook.boxplot_stats(avg_fscores_activities[idx], whis=(5, 95))[0])
+
+ # plot accuracies
+ stats = [stats_accuracies_cross_val, stats_accuracies_activities]
+ stats_names = ['stats_accuracies_cross_val', 'stats_accuracies_activities']
+
+ labels = [r'$\left(T_c,N\right)$',
+ r'$\left(\frac{T_c}{2},N\right)$', r'$\left(\frac{T_c}{2},\frac{N}{2}\right)$',
+ r'$\left(\frac{T_c}{3},N\right)$', r'$\left(\frac{T_c}{3},\frac{N}{3}\right)$',
+ r'$\left(\frac{T_c}{4},N\right)$', r'$\left(\frac{T_c}{4},\frac{N}{4}\right)$',
+ r'$\left(\frac{T_c}{5},N\right)$', r'$\left(\frac{T_c}{5},\frac{N}{5}\right)$']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(num_files), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(labels, rotation=0)
+ plt.grid(which='both')
+ plt.ylim([0.2, 1])
+ plt.yticks(np.linspace(0.2, 1, 9), np.linspace(20, 100, 9, dtype=int))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'accuracy [$\%$]')
+ name_fig = './plots/change_sampl_' + stats_names[idx_st] + '_combined.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot fscores
+ stats = [stats_fscores_cross_val, stats_fscores_activities]
+ stats_names = ['stats_fscores_cross_val', 'stats_fscores_activities']
+
+ for idx_st, stat in enumerate(stats):
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(9, 2.5)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stat, positions=np.arange(num_files), showfliers=False, widths=0.2)
+ plt.setp(bp['boxes'], color='black', linewidth=1.5)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ ax.set_xticklabels(labels, rotation=0)
+ plt.grid(which='both')
+ plt.ylim([0.2, 1])
+ plt.yticks(np.linspace(0.2, 1, 9))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'F1-score')
+ name_fig = './plots/change_sampl_' + stats_names[idx_st] + '_combined.pdf'
+ plt.savefig(name_fig)
+ plt.close()
+
+ # plot accuracy f-score together cross-val
+ stats = [stats_accuracies_activities, stats_fscores_activities]
+
+ fig, ax = plt.subplots(1, 1, constrained_layout=True)
+ fig.set_size_inches(7.2, 2.9)
+ # Plot boxplots from our computed statistics
+ bp = ax.bxp(stats[0], positions=np.arange(num_files) - 0.15, showfliers=False, widths=0.30,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C4', alpha=0.7))
+
+ bp = ax.bxp(stats[1], positions=np.arange(num_files) + 0.15, showfliers=False, widths=0.30,
+ manage_ticks=False)
+ plt.setp(bp['boxes'], color='black', linewidth=1)
+ plt.setp(bp['medians'], color='black', linewidth=1.5)
+ plt.setp(bp['whiskers'], color='black')
+ for box in bp['boxes']:
+ box_x = []
+ box_y = []
+ for j in range(5):
+ box_x.append(box.get_xdata()[j])
+ box_y.append(box.get_ydata()[j])
+ box_coords = np.column_stack([box_x, box_y])
+ ax.add_patch(Polygon(box_coords, facecolor='C1'))
+
+ ax.set_xticks(np.arange(num_files))
+ ax.set_xticklabels(labels, rotation=0)
+ plt.grid(which='both')
+ plt.ylim([0.2, 1])
+ plt.yticks(np.linspace(0.2, 1, 9))
+ plt.xlabel(r'sampling')
+ plt.ylabel(r'metric')
+ custom_lines = [Line2D([0], [0], color='C4', linewidth=4, alpha=0.7),
+ Line2D([0], [0], color='C1', linewidth=4)]
+ plt.legend(custom_lines, [r'accuracy', r'F1-score'],
+ ncol=2, labelspacing=0.2, columnspacing=0.5, fontsize='medium', loc='lower center')
+ name_fig = './plots/change_sampl_accuracy_fscore_activities_combined.pdf'
+ plt.savefig(name_fig)
+ plt.close()
diff --git a/Python_code/CSI_phase_sanitization_H_estimation.py b/Python_code/CSI_phase_sanitization_H_estimation.py
new file mode 100644
index 0000000..06732b0
--- /dev/null
+++ b/Python_code/CSI_phase_sanitization_H_estimation.py
@@ -0,0 +1,181 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+from optimization_utility import *
+from os import listdir
+import pickle
+from os import path
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('all_dir', help='All the files in the directory, default no', type=int, default=0)
+ parser.add_argument('name', help='Name of experiment file')
+ parser.add_argument('nss', help='Number of spatial streams', type=int)
+ parser.add_argument('ncore', help='Number of cores', type=int)
+ parser.add_argument('start_r', help='Start processing', type=int)
+ parser.add_argument('end_r', help='End processing', type=int)
+ args = parser.parse_args()
+
+ exp_save_dir = args.dir
+ names = []
+
+ if args.all_dir:
+ all_files = listdir(exp_save_dir)
+ mat_files = []
+ for i in range(len(all_files)):
+ if all_files[i].endswith('.mat'):
+ names.append(all_files[i][:-4])
+ else:
+ names.append(args.name)
+
+ n_ss = args.nss
+ n_core = args.ncore
+ n_tot = n_ss * n_core
+
+ for name in names:
+ name_file_r = './phase_processing/r_vector_' + name + '_stream_' + str(n_tot - 1) + '.txt'
+ if path.exists(name_file_r):
+ print('Already processed')
+ continue
+
+ name_file = './phase_processing/signal_' + name + '.txt'
+ with open(name_file, "rb") as fp: # Pickling
+ signal_complete = pickle.load(fp)
+
+ delete_idxs = np.asarray([-512, -511, -510, -509, -508, -507, -506, -505, -504, -503, -502, -501,
+ -2, -1, 0, 1, 2,
+ 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511], dtype=int) + 512
+ # pilot_subcarriers = np.asarray([-468, -400, -334, -266, -158, -92, -24, 24, 92, 158, 226, 334, 400, 468]) + 512
+ subcarriers_space = 2
+ delta_t = 1E-7
+ delta_t_refined = 5E-9
+ range_refined_up = 2.5E-7
+ range_refined_down = 2E-7
+
+ start_r = args.start_r
+ if args.end_r != -1:
+ end_r = args.end_r
+ else:
+ end_r = signal_complete.shape[1]
+
+ F_frequency = 1024
+ delta_f = 78.125E3
+ frequency_vector_complete = np.zeros(F_frequency, )
+ F_frequency_2 = F_frequency // 2
+ for row in range(F_frequency_2):
+ freq_n = delta_f * (row - F_frequency / 2)
+ frequency_vector_complete[row] = freq_n
+ freq_p = delta_f * row
+ frequency_vector_complete[row + F_frequency_2] = freq_p
+ frequency_vector = np.delete(frequency_vector_complete, delete_idxs)
+
+ T = 1/delta_f
+ t_min = -3E-7
+ t_max = 5E-7
+
+ T_matrix, time_matrix = build_T_matrix(frequency_vector, delta_t, t_min, t_max)
+ r_length = int((t_max - t_min) / delta_t_refined)
+
+ start_subcarrier = 0
+ end_subcarrier = frequency_vector.shape[0]
+ select_subcarriers = np.arange(start_subcarrier, end_subcarrier, subcarriers_space)
+
+ # Auxiliary data for first step
+ row_T = int(T_matrix.shape[0] / subcarriers_space)
+ col_T = T_matrix.shape[1]
+ m = 2 * row_T
+ n = 2 * col_T
+ In = scipy.sparse.eye(n)
+ Im = scipy.sparse.eye(m)
+ On = scipy.sparse.csc_matrix((n, n))
+ Onm = scipy.sparse.csc_matrix((n, m))
+ P = scipy.sparse.block_diag([On, Im, On], format='csc')
+ q = np.zeros(2 * n + m)
+ A2 = scipy.sparse.hstack([In, Onm, -In])
+ A3 = scipy.sparse.hstack([In, Onm, In])
+ ones_n_matr = np.ones(n)
+ zeros_n_matr = np.zeros(n)
+ zeros_nm_matr = np.zeros(n + m)
+
+ for stream in range(0, 4):
+ name_file = './phase_processing/r_vector' + name + '_stream_' + str(stream) + '.txt'
+ signal_considered = signal_complete[:, start_r:end_r, stream]
+ r_optim = np.zeros((r_length, end_r - start_r), dtype=complex)
+ Tr_matrix = np.zeros((frequency_vector_complete.shape[0], end_r - start_r), dtype=complex)
+
+ for time_step in range(end_r - start_r):
+ signal_time = signal_considered[:, time_step]
+ complex_opt_r = lasso_regression_osqp_fast(signal_time, T_matrix, select_subcarriers, row_T, col_T,
+ Im, Onm, P, q, A2, A3, ones_n_matr, zeros_n_matr,
+ zeros_nm_matr)
+
+ position_max_r = np.argmax(abs(complex_opt_r))
+ time_max_r = time_matrix[position_max_r]
+
+ T_matrix_refined, time_matrix_refined = build_T_matrix(frequency_vector, delta_t_refined,
+ max(time_max_r - range_refined_down, t_min),
+ min(time_max_r + range_refined_up, t_max))
+
+ # Auxiliary data for second step
+ col_T_refined = T_matrix_refined.shape[1]
+ n_refined = 2 * col_T_refined
+ In_refined = scipy.sparse.eye(n_refined)
+ On_refined = scipy.sparse.csc_matrix((n_refined, n_refined))
+ Onm_refined = scipy.sparse.csc_matrix((n_refined, m))
+ P_refined = scipy.sparse.block_diag([On_refined, Im, On_refined], format='csc')
+ q_refined = np.zeros(2 * n_refined + m)
+ A2_refined = scipy.sparse.hstack([In_refined, Onm_refined, -In_refined])
+ A3_refined = scipy.sparse.hstack([In_refined, Onm_refined, In_refined])
+ ones_n_matr_refined = np.ones(n_refined)
+ zeros_n_matr_refined = np.zeros(n_refined)
+ zeros_nm_matr_refined = np.zeros(n_refined + m)
+
+ complex_opt_r_refined = lasso_regression_osqp_fast(signal_time, T_matrix_refined, select_subcarriers,
+ row_T, col_T_refined, Im, Onm_refined, P_refined,
+ q_refined, A2_refined, A3_refined,
+ ones_n_matr_refined, zeros_n_matr_refined,
+ zeros_nm_matr_refined)
+
+ position_max_r_refined = np.argmax(abs(complex_opt_r_refined))
+
+ T_matrix_refined, time_matrix_refined = build_T_matrix(frequency_vector_complete, delta_t_refined,
+ max(time_max_r - range_refined_down, t_min),
+ min(time_max_r + range_refined_up, t_max))
+
+ Tr = np.multiply(T_matrix_refined, complex_opt_r_refined)
+
+ Tr_sum = np.sum(Tr, axis=1)
+
+ Trr = np.multiply(Tr, np.conj(Tr[:, position_max_r_refined:position_max_r_refined + 1]))
+ Trr_sum = np.sum(Trr, axis=1)
+
+ Tr_matrix[:, time_step] = Trr_sum
+ time_max_r = time_matrix_refined[position_max_r_refined]
+
+ start_r_opt = int((time_matrix_refined[0] - t_min)/delta_t_refined)
+ end_r_opt = start_r_opt + complex_opt_r_refined.shape[0]
+ r_optim[start_r_opt:end_r_opt, time_step] = complex_opt_r_refined
+
+ name_file_r = './phase_processing/r_vector_' + name + '_stream_' + str(stream) + '.txt'
+ with open(name_file_r, "wb") as fp: # Pickling
+ pickle.dump(r_optim, fp)
+
+ name_file_Tr = './phase_processing/Tr_vector_' + name + '_stream_' + str(stream) + '.txt'
+ with open(name_file_Tr, "wb") as fp: # Pickling
+ pickle.dump(Tr_matrix, fp)
diff --git a/Python_code/CSI_phase_sanitization_signal_preprocessing.py b/Python_code/CSI_phase_sanitization_signal_preprocessing.py
new file mode 100644
index 0000000..d552031
--- /dev/null
+++ b/Python_code/CSI_phase_sanitization_signal_preprocessing.py
@@ -0,0 +1,115 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import scipy.io as sio
+from os import listdir
+import pickle
+from os import path
+
+
+def hampel_filter(input_matrix, window_size, n_sigmas=3):
+ n = input_matrix.shape[1]
+ new_matrix = np.zeros_like(input_matrix)
+ k = 1.4826 # scale factor for Gaussian distribution
+
+ for ti in range(n):
+ start_time = max(0, ti - window_size)
+ end_time = min(n, ti + window_size)
+ x0 = np.nanmedian(input_matrix[:, start_time:end_time], axis=1, keepdims=True)
+ s0 = k * np.nanmedian(np.abs(input_matrix[:, start_time:end_time] - x0), axis=1)
+ mask = (np.abs(input_matrix[:, ti] - x0[:, 0]) > n_sigmas * s0)
+ new_matrix[:, ti] = mask*x0[:, 0] + (1 - mask)*input_matrix[:, ti]
+
+ return new_matrix
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('all_dir', help='All the files in the directory, default no', type=int, default=0)
+ parser.add_argument('name', help='Name of experiment file')
+ parser.add_argument('nss', help='Number of spatial streams', type=int)
+ parser.add_argument('ncore', help='Number of cores', type=int)
+ parser.add_argument('nsubchannels', help='Number of subchannels', type=int)
+ parser.add_argument('start_idx', help='Idx where start processing for each stream', type=int)
+ args = parser.parse_args()
+
+ exp_dir = args.dir
+ names = []
+
+ if args.all_dir:
+ all_files = listdir(exp_dir)
+ mat_files = []
+ for i in range(len(all_files)):
+ if all_files[i].endswith('.mat'):
+ names.append(all_files[i][:-4])
+ else:
+ names.append(args.name)
+
+ for name in names:
+ name_file = './phase_processing/signal_' + name + '.txt'
+ if path.exists(name_file):
+ print('Already processed')
+ continue
+
+ csi_buff_file = exp_dir + name + ".mat"
+ csi_buff_struct = sio.loadmat(csi_buff_file)
+ csi_buff_struct = (csi_buff_struct['cores'])
+
+ npkt = csi_buff_struct.shape[1]
+ ncore = args.ncore
+ nsubchannels = args.nsubchannels
+
+ csi_buff = np.zeros((nsubchannels, npkt, ncore), dtype=complex)
+
+ matrix_idx = 0
+ for pkt_idx in range(npkt):
+ for core_idx in range(ncore):
+ inserted = True
+ try:
+ csi_buff[:, matrix_idx, core_idx] = csi_buff_struct[0][pkt_idx][0, core_idx]['nss'][0][0][0, 0]['data'][0][0]
+ except IndexError:
+ inserted = False
+ continue
+ if inserted:
+ matrix_idx += 1
+
+ csi_buff = csi_buff[:, :matrix_idx - 1, :]
+
+ csi_buff = np.fft.fftshift(csi_buff, axes=0)
+
+ delete_idxs = np.argwhere(np.sum(np.sum(csi_buff, axis=0), axis=1) == 0)[:, 0] # packets empty
+ csi_buff = np.delete(csi_buff, delete_idxs, axis=1)
+
+ delete_idxs = np.asarray([-512, -511, -510, -509, -508, -507, -506, -505, -504, -503, -502, -501,
+ -2, -1, 0, 1, 2,
+ 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511], dtype=int) + 512
+ pilot_subcarriers = np.asarray([-468, -400, -334, -266, -158, -92, -24, 24, 92, 158, 226, 334, 400, 468]) + 512
+ csi_buff = np.delete(csi_buff, delete_idxs, axis=0)
+
+ n_ss = args.nss
+ n_core = args.ncore
+ n_tot = n_ss * n_core
+
+ start = args.start_idx # 1000
+ end = csi_buff.shape[1]
+ signal_complete = csi_buff[:, start:end, :]
+
+ name_file = './phase_processing/signal_' + name + '.txt'
+ with open(name_file, "wb") as fp: # Pickling
+ pickle.dump(signal_complete, fp)
diff --git a/Python_code/CSI_phase_sanitization_signal_reconstruction.py b/Python_code/CSI_phase_sanitization_signal_reconstruction.py
new file mode 100644
index 0000000..e78bd47
--- /dev/null
+++ b/Python_code/CSI_phase_sanitization_signal_reconstruction.py
@@ -0,0 +1,119 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import argparse
+import numpy as np
+import scipy.io as sio
+from os import listdir, path
+import pickle
+import math as mt
+import os
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('dir', help='Directory of data')
+ parser.add_argument('dir_save', help='Directory to save processed data')
+ parser.add_argument('nss', help='Number of spatial streams', type=int)
+ parser.add_argument('ncore', help='Number of cores', type=int)
+ parser.add_argument('nsubchannels', help='Number of subchannels', type=int)
+ parser.add_argument('start_idx', help='Start index', type=int)
+ parser.add_argument('end_idx', help='End index from the end', type=int)
+ args = parser.parse_args()
+
+ exp_dir = args.dir
+ save_dir = args.dir_save
+ names = []
+
+ all_files = listdir(exp_dir)
+ for i in range(len(all_files)):
+ if all_files[i].startswith('Tr') and all_files[i].endswith('.txt'):
+ names.append(all_files[i][:-4])
+
+ for name in names:
+ name_f = name[10:] + '.mat'
+ stop = False
+ sub_dir_name = name_f[0:-13]
+ subdir_path = save_dir + sub_dir_name
+
+ complete_path = subdir_path + '/' + name_f
+ print(complete_path)
+ if path.isfile(complete_path):
+ stop = True
+
+ if stop:
+ print('Already processed')
+ continue
+
+ if not os.path.exists(subdir_path):
+ os.mkdir(subdir_path)
+
+ name_file_save = subdir_path + '/' + name_f
+ name_file = exp_dir + name + '.txt'
+
+ with open(name_file, "rb") as fp: # Unpickling
+ H_est = pickle.load(fp)
+
+ end_H = H_est.shape[1]
+ H_est = H_est[:, args.start_idx:end_H-args.end_idx]
+ F_frequency = args.nsubchannels
+ csi_matrix_processed = np.zeros((H_est.shape[1], F_frequency, 2))
+
+ # AMPLITUDE
+ csi_matrix_processed[:, 12:-11, 0] = np.abs(H_est[12:-11, :]).T
+
+ # PHASE
+ phase_before = np.unwrap(np.angle(H_est[12:-11, :]), axis=0)
+ phase_err_tot = np.diff(phase_before, axis=1)
+ ones_vector = np.ones((2, phase_before.shape[0]))
+ ones_vector[1, :] = np.arange(0, phase_before.shape[0])
+ for tidx in range(1, phase_before.shape[1]):
+ stop = False
+ idx_prec = -1
+ while not stop:
+ phase_err = phase_before[:, tidx] - phase_before[:, tidx - 1]
+ diff_phase_err = np.diff(phase_err)
+ idxs_invert_up = np.argwhere(diff_phase_err > 0.9 * mt.pi)[:, 0]
+ idxs_invert_down = np.argwhere(diff_phase_err < -0.9 * mt.pi)[:, 0]
+ if idxs_invert_up.shape[0] > 0:
+ idx_act = idxs_invert_up[0]
+ if idx_act == idx_prec: # to avoid a continuous jump
+ stop = True
+ else:
+ phase_before[idx_act + 1:, tidx] = phase_before[idx_act + 1:, tidx] \
+ - 2 * mt.pi
+ idx_prec = idx_act
+ elif idxs_invert_down.shape[0] > 0:
+ idx_act = idxs_invert_down[0]
+ if idx_act == idx_prec:
+ stop = True
+ else:
+ phase_before[idx_act + 1:, tidx] = phase_before[idx_act + 1:, tidx] \
+ + 2 * mt.pi
+ idx_prec = idx_act
+ else:
+ stop = True
+ for tidx in range(1, H_est.shape[1] - 1):
+ val_prec = phase_before[:, tidx - 1:tidx]
+ val_act = phase_before[:, tidx:tidx + 1]
+ error = val_act - val_prec
+ temp2 = np.linalg.lstsq(ones_vector.T, error)[0]
+ phase_before[:, tidx] = phase_before[:, tidx] - (np.dot(ones_vector.T, temp2)).T
+
+ csi_matrix_processed[:, 12:-11, 1] = phase_before.T
+
+ mdic = {"csi_matrix_processed": csi_matrix_processed[:, 12:-11, :]}
+ sio.savemat(name_file_save, mdic)
diff --git a/Python_code/cache_files/.placeholder b/Python_code/cache_files/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/dataset_utility.py b/Python_code/dataset_utility.py
new file mode 100644
index 0000000..fd89275
--- /dev/null
+++ b/Python_code/dataset_utility.py
@@ -0,0 +1,150 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import numpy as np
+import pickle
+import tensorflow as tf
+
+
+def convert_to_number(lab, csi_label_dict):
+ lab_num = np.argwhere(np.asarray(csi_label_dict) == lab)[0][0]
+ return lab_num
+
+
+def create_windows(csi_list, labels_list, sample_length, stride_length):
+ csi_matrix_stride = []
+ labels_stride = []
+ for i in range(len(labels_list)):
+ csi_i = csi_list[i]
+ label_i = labels_list[i]
+ len_csi = csi_i.shape[1]
+ for ii in range(0, len_csi - sample_length, stride_length):
+ csi_matrix_stride.append(csi_i[:, ii:ii+sample_length])
+ labels_stride.append(label_i)
+ return csi_matrix_stride, labels_stride
+
+
+def create_windows_antennas(csi_list, labels_list, sample_length, stride_length, remove_mean=False):
+ csi_matrix_stride = []
+ labels_stride = []
+ for i in range(len(labels_list)):
+ csi_i = csi_list[i]
+ label_i = labels_list[i]
+ len_csi = csi_i.shape[2]
+ for ii in range(0, len_csi - sample_length, stride_length):
+ csi_wind = csi_i[:, :, ii:ii + sample_length, ...]
+ if remove_mean:
+ csi_mean = np.mean(csi_wind, axis=2, keepdims=True)
+ csi_wind = csi_wind - csi_mean
+ csi_matrix_stride.append(csi_wind)
+ labels_stride.append(label_i)
+ return csi_matrix_stride, labels_stride
+
+
+def expand_antennas(file_names, labels, num_antennas):
+ file_names_expanded = [item for item in file_names for _ in range(num_antennas)]
+ labels_expanded = [item for item in labels for _ in range(num_antennas)]
+ stream_ant = np.tile(np.arange(num_antennas), len(labels))
+ return file_names_expanded, labels_expanded, stream_ant
+
+
+def load_data(csi_file_t):
+ csi_file = csi_file_t
+ if isinstance(csi_file_t, (bytes, bytearray)):
+ csi_file = csi_file.decode()
+ with open(csi_file, "rb") as fp: # Unpickling
+ matrix_csi = pickle.load(fp)
+ matrix_csi = tf.transpose(matrix_csi, perm=[2, 1, 0])
+ matrix_csi = tf.cast(matrix_csi, tf.float32)
+ return matrix_csi
+
+
+def create_dataset(csi_matrix_files, labels_stride, input_shape, batch_size, shuffle, cache_file, prefetch=True,
+ repeat=True):
+ dataset_csi = tf.data.Dataset.from_tensor_slices((csi_matrix_files, labels_stride))
+ py_funct = lambda csi_file, label: (tf.ensure_shape(tf.numpy_function(load_data, [csi_file], tf.float32),
+ input_shape), label)
+ dataset_csi = dataset_csi.map(py_funct)
+ dataset_csi = dataset_csi.cache(cache_file)
+ if shuffle:
+ dataset_csi = dataset_csi.shuffle(len(labels_stride))
+ if repeat:
+ dataset_csi = dataset_csi.repeat()
+ dataset_csi = dataset_csi.batch(batch_size=batch_size)
+ if prefetch:
+ dataset_csi = dataset_csi.prefetch(buffer_size=1)
+ return dataset_csi
+
+
+def randomize_antennas(csi_data):
+ stream_order = np.random.permutation(csi_data.shape[2])
+ csi_data_randomized = csi_data[:, :, stream_order]
+ return csi_data_randomized
+
+
+def create_dataset_randomized_antennas(csi_matrix_files, labels_stride, input_shape, batch_size, shuffle, cache_file,
+ prefetch=True, repeat=True):
+ dataset_csi = tf.data.Dataset.from_tensor_slices((csi_matrix_files, labels_stride))
+ py_funct = lambda csi_file, label: (tf.ensure_shape(tf.numpy_function(load_data, [csi_file], tf.float32),
+ input_shape), label)
+ dataset_csi = dataset_csi.map(py_funct)
+ dataset_csi = dataset_csi.cache(cache_file)
+
+ if shuffle:
+ dataset_csi = dataset_csi.shuffle(len(labels_stride))
+ if repeat:
+ dataset_csi = dataset_csi.repeat()
+
+ randomize_funct = lambda csi_data, label: (tf.ensure_shape(tf.numpy_function(randomize_antennas, [csi_data],
+ tf.float32), input_shape), label)
+ dataset_csi = dataset_csi.map(randomize_funct)
+
+ dataset_csi = dataset_csi.batch(batch_size=batch_size)
+ if prefetch:
+ dataset_csi = dataset_csi.prefetch(buffer_size=1)
+ return dataset_csi
+
+
+def load_data_single(csi_file_t, stream_a):
+ csi_file = csi_file_t
+ if isinstance(csi_file_t, (bytes, bytearray)):
+ csi_file = csi_file.decode()
+ with open(csi_file, "rb") as fp: # Unpickling
+ matrix_csi = pickle.load(fp)
+ matrix_csi_single = matrix_csi[stream_a, ...].T
+ if len(matrix_csi_single.shape) < 3:
+ matrix_csi_single = np.expand_dims(matrix_csi_single, axis=-1)
+ matrix_csi_single = tf.cast(matrix_csi_single, tf.float32)
+ return matrix_csi_single
+
+
+def create_dataset_single(csi_matrix_files, labels_stride, stream_ant, input_shape, batch_size, shuffle, cache_file,
+ prefetch=True, repeat=True):
+ stream_ant = list(stream_ant)
+ dataset_csi = tf.data.Dataset.from_tensor_slices((csi_matrix_files, labels_stride, stream_ant))
+ py_funct = lambda csi_file, label, stream: (tf.ensure_shape(tf.numpy_function(load_data_single,
+ [csi_file, stream],
+ tf.float32), input_shape), label)
+ dataset_csi = dataset_csi.map(py_funct)
+ dataset_csi = dataset_csi.cache(cache_file)
+ if shuffle:
+ dataset_csi = dataset_csi.shuffle(len(labels_stride))
+ if repeat:
+ dataset_csi = dataset_csi.repeat()
+ dataset_csi = dataset_csi.batch(batch_size=batch_size)
+ if prefetch:
+ dataset_csi = dataset_csi.prefetch(buffer_size=1)
+ return dataset_csi
diff --git a/Python_code/doppler_traces/.placeholder b/Python_code/doppler_traces/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/evaluations/.placeholder b/Python_code/evaluations/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/logs/.placeholder b/Python_code/logs/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/network_utility.py b/Python_code/network_utility.py
new file mode 100644
index 0000000..74d7b02
--- /dev/null
+++ b/Python_code/network_utility.py
@@ -0,0 +1,54 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import tensorflow as tf
+
+
+def conv2d_bn(x_in, filters, kernel_size, strides=(1, 1), padding='same', activation='relu', bn=False, name=None):
+ x = tf.keras.layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, name=name)(x_in)
+ if bn:
+ bn_name = None if name is None else name + '_bn'
+ x = tf.keras.layers.BatchNormalization(axis=3, name=bn_name)(x)
+ if activation is not None:
+ x = tf.keras.layers.Activation(activation)(x)
+ return x
+
+
+def reduction_a_block_small(x_in, base_name):
+ x1 = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='valid')(x_in)
+
+ x2 = conv2d_bn(x_in, 5, (2, 2), strides=(2, 2), padding='valid', name=base_name + 'conv2_1_res_a')
+
+ x3 = conv2d_bn(x_in, 3, (1, 1), name=base_name + 'conv3_1_res_a')
+ x3 = conv2d_bn(x3, 6, (2, 2), name=base_name + 'conv3_2_res_a')
+ x3 = conv2d_bn(x3, 9, (4, 4), strides=(2, 2), padding='same', name=base_name + 'conv3_3_res_a')
+
+ x4 = tf.keras.layers.Concatenate()([x1, x2, x3])
+ return x4
+
+
+def csi_network_inc_res(input_sh, output_sh):
+ x_input = tf.keras.Input(input_sh)
+
+ x2 = reduction_a_block_small(x_input, base_name='1st')
+
+ x3 = conv2d_bn(x2, 3, (1, 1), name='conv4')
+
+ x = tf.keras.layers.Flatten()(x3)
+ x = tf.keras.layers.Dropout(0.2)(x)
+ x = tf.keras.layers.Dense(output_sh, activation=None, name='dense2')(x)
+ model = tf.keras.Model(inputs=x_input, outputs=x, name='csi_model')
+ return model
diff --git a/Python_code/networks/.placeholder b/Python_code/networks/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/optimization_utility.py b/Python_code/optimization_utility.py
new file mode 100644
index 0000000..87bea32
--- /dev/null
+++ b/Python_code/optimization_utility.py
@@ -0,0 +1,86 @@
+
+"""
+ Copyright (C) 2023 Francesca Meneghello
+ contact: meneghello@dei.unipd.it
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+"""
+
+import numpy as np
+import cmath as cmt
+import osqp
+import scipy
+
+
+def convert_to_complex_osqp(real_im_n):
+ len_vect = real_im_n.shape[0] // 2
+ complex_n = real_im_n[:len_vect] + 1j * real_im_n[len_vect:]
+ return complex_n
+
+
+def build_T_matrix(frequency_vector, delta_t_, t_min_, t_max_):
+ F_frequency = frequency_vector.shape[0]
+ L_paths = int((t_max_ - t_min_) / delta_t_)
+ T_matrix = np.zeros((F_frequency, L_paths), dtype=complex)
+ time_matrix = np.zeros((L_paths,))
+ for col in range(L_paths):
+ time_col = t_min_ + delta_t_ * col
+ time_matrix[col] = time_col
+ for row in range(F_frequency):
+ freq_n = frequency_vector[row]
+ T_matrix[row, col] = cmt.exp(-1j * 2 * cmt.pi * freq_n * time_col)
+ return T_matrix, time_matrix
+
+
+def lasso_regression_osqp_fast(H_matrix_, T_matrix_, selected_subcarriers, row_T, col_T, Im, Onm, P, q, A2, A3,
+ ones_n_matr, zeros_n_matr, zeros_nm_matr):
+ # time_start = time.time()
+ T_matrix_selected = T_matrix_[selected_subcarriers, :]
+ H_matrix_selected = H_matrix_[selected_subcarriers]
+
+ T_matrix_real = np.zeros((2*row_T, 2*col_T))
+ T_matrix_real[:row_T, :col_T] = np.real(T_matrix_selected)
+ T_matrix_real[row_T:, col_T:] = np.real(T_matrix_selected)
+ T_matrix_real[row_T:, :col_T] = np.imag(T_matrix_selected)
+ T_matrix_real[:row_T, col_T:] = - np.imag(T_matrix_selected)
+
+ H_matrix_real = np.zeros((2*row_T))
+ H_matrix_real[:row_T] = np.real(H_matrix_selected)
+ H_matrix_real[row_T:] = np.imag(H_matrix_selected)
+
+ n = col_T*2
+
+ # OSQP data
+ A = scipy.sparse.vstack([scipy.sparse.hstack([T_matrix_real, -Im, Onm.T]),
+ A2,
+ A3], format='csc')
+ l = np.hstack([H_matrix_real, - np.inf * ones_n_matr, zeros_n_matr])
+ u = np.hstack([H_matrix_real, zeros_n_matr, np.inf * ones_n_matr])
+
+ # Create an OSQP object
+ prob = osqp.OSQP()
+
+ # Setup workspace
+ prob.setup(P, q, A, l, u, warm_start=True, verbose=False)
+
+ # Update linear cost
+ lambd = 1E-1
+ q_new = np.hstack([zeros_nm_matr, lambd * ones_n_matr])
+ prob.update(q=q_new)
+
+ # Solve
+ res = prob.solve()
+
+ x_out = res.x
+ x_out_cut = x_out[:n]
+
+ r_opt = convert_to_complex_osqp(x_out_cut)
+ return r_opt
diff --git a/Python_code/outputs/.placeholder b/Python_code/outputs/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/phase_processing/.placeholder b/Python_code/phase_processing/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/plots/.placeholder b/Python_code/plots/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/Python_code/processed_phase/.placeholder b/Python_code/processed_phase/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/README.md b/README.md
index 2bc8c2a..acbddb6 100644
--- a/README.md
+++ b/README.md
@@ -2,14 +2,18 @@
Algorithms for human activity recognition with a commercial IEEE 802.11ax router @ 5 GHz, 80 MHz of bandwidth.
-This repository contains the reference code for the article [''Towards Integrated Sensing and Communications in IEEE 802.11bf Wi-Fi Networks''](https://arxiv.org/abs/2212.13930). The algorithms are an adaptation to the case of 802.11ax devices of the original [''SHARP algorithm''](https://ieeexplore.ieee.org/document/9804861).
+This repository contains the reference code for the article [''Toward Integrated Sensing and Communications in IEEE 802.11bf Wi-Fi Networks''](https://arxiv.org/abs/2212.13930). The algorithms are an adaptation to the case of 802.11ax devices of the original [''SHARP algorithm''](https://ieeexplore.ieee.org/document/9804861).
If you find the project useful and you use this code, please cite our articles:
```
- @misc{meneghello2023toward,
- author = {Meneghello, Francesca and Chen, Cheng and Cordeiro, Carlos and Restuccia, Francesco},
- title = {{Towards Integrated Sensing and Communications in IEEE 802.11bf Wi-Fi Networks}},
- year = {2022}
+ @article{meneghello2023toward,
+ author = {Meneghello, Francesca and Chen, Cheng and Cordeiro, Carlos and Restuccia, Francesco},
+ journal={IEEE Communications Magazine},
+ title = {{Toward Integrated Sensing and Communications in IEEE 802.11bf Wi-Fi Networks}},
+ year = {2023},
+ volume={},
+ number={},
+ pages={}
}
```
@@ -25,6 +29,113 @@ If you find the project useful and you use this code, please cite our articles:
}
```
-## NOTE: The article [meneghello2023toward] is currently under peer review. The code and the dataset will be available after the article pubblication.
+## How to use
+Clone the repository and enter the folder with the python code:
+```bash
+cd
+git clone https://github.com/francescamen/SHARPax
+```
+
+Download the input data from [here](https://drive.google.com/file/d/1JbWNV3fMAF-26SJfqX0EohkkrUe2SeC4/view?usp=sharing) and unzip the file.
+For your convenience, you can use the ```input_files/processed_files/``` folder inside this project to place the files but the scripts work whatever is the source folder.
+
+The dataset contains Wi-Fi channel frequency response (CFR) data collected in an IEEE 802.11ax network through [AX CSI](https://ans.unibs.it/projects/ax-csi/).
+The network consists of two ASUS RT-AX86U Wi-Fi routers operating on the IEEE 802.11ax channel number 157 using the OFDMA resource unit RU1-996, i.e., with a bandwidth of 80 MHz and 996 data sub-channels. The CFR is obtained for each packet collected by the receiver device while a person acts as an obstacle for the transmission by performing different activities.
+The considered movements are the following: walking (W) or running (R) around, and staying (S) in place.
+The CFR data for the empty room (E) is also provided.
+The complete description of the dataset can be found in the reference paper and in the IEEE DataPort repository.
+
+The code for SHARPax is implemented in Python and can be found in the ```Python_code``` folder inside this repository. The scripts to perform the processing are described in the following, together with the specific parameters.
+
+### Phase sanitization
+The following three scripts encode the phase sanitization algorithm detailed in Section 3.1 of [meneghello2022sharp](https://ieeexplore.ieee.org/document/9804861)].
+```bash
+python CSI_phase_sanitization_signal_preprocessing.py <'directory of the input data'> <'process all the files in subdirectories (1) or not (0)'> <'name of the file to process (only if 0 in the previous field)'> <'number of spatial streams'> <'number of cores'> <'number of OFDMA sub-channels including control sub-channels'> <'index where to start the processing for each stream'>
+```
+e.g., python CSI_phase_sanitization_signal_preprocessing.py ../input_files/processed_files/ 1 - 1 4 1024 0
+
+```bash
+python CSI_phase_sanitization_H_estimation.py <'directory of the input data'> <'process all the files in subdirectories (1) or not (0)'> <'name of the file to process (only if 0 in the previous field)'> <'number of spatial streams'> <'number of cores'> <'index where to start the processing for each stream'> <'index where to stop the processing for each stream'>
+```
+e.g., python CSI_phase_sanitization_H_estimation.py ../input_files/processed_files/ 0 R2_P1 1 4 0 -1
+
+```bash
+python CSI_phase_sanitization_signal_reconstruction.py <'directory of the processed data'> <'directory to save the reconstructed data'> <'number of spatial streams'> <'number of cores'> <'number of OFDMA sub-channels including control sub-channels'> <'index where to start the processing for each stream'> <'index where to stop the processing for each stream'>
+```
+e.g., python CSI_phase_sanitization_signal_reconstruction.py ./phase_processing/ ./processed_phase/ 1 4 1024 0 -1
+
+### Doppler computation
+The following script computes the Doppler spectrum as described in Section 3.2 of [meneghello2022sharp](https://ieeexplore.ieee.org/document/9804861)].
+
+```bash
+python CSI_doppler_computation.py <'directory of the reconstructed data'> <'sub-directories of data'> <'directory to save the Doppler data'> <'starting index to process data'> <'end index to process data (samples from the end)'> <'number of packets in a sample'> <'number of packets for sliding operations'> <'noise level'> <--bandwidth 'bandwidth'> <--sub_band 'sub band to consider (in {1, 2} for 40 MHz, in {1, 2, 3, 4} for 20 MHz)'> <-- sub_sampling 'sub sampling factor in {1, ..., 6}'>
+```
+e.g., python CSI_doppler_computation.py ./processed_phase/ E1,E2,E3,E4,R1_P1,R2_P1,R3_P1,R4_P1,S1_P1,S2_P1,S3_P1,S4_P1,W1_P1,W2_P1,W3_P1,W4_P1 ./doppler_traces/ 200 200 25 1 -1.5 --bandwidth 40 --sub_band 2 --sub_sampling 1
+
+Helper function to visualize the Doppler traces:
+```bash
+python CSI_doppler_plot_antennas.py <'directory of the Doppler data'> <'sub-directories of data'> <'number of packets in a sample'> <'number of packets for sliding operations'> <'end index to visualize data (samples from the end)'> <'noise level'> <--bandwidth 'bandwidth'> <--sub_band 'sub band to consider (in {1, 2} for 40 MHz, in {1, 2, 3, 4} for 20 MHz)'> <-- sub_sampling 'sub sampling factor in {1, ..., 6}'>
+```
+e.g., python CSI_doppler_plot_antennas.py ./doppler_traces/ E1,E2,E3,E4,R1_P1,R2_P1,R3_P1,R4_P1,S1_P1,S2_P1,S3_P1,S4_P1,W1_P1,W2_P1,W3_P1,W4_P1 31 1 -1 -1.5
+
+### Dataset creation
+- Create the datasets for cross validation
+```bash
+python CSI_doppler_create_datasets_cross_val.py <'directory of the Doppler data'> <'sub-directories, comma-separated'> <'number of packets in a sample'> <'number of packets for sliding operations'> <'number of samples per window'> <'number of samples for window sliding'> <'labels of the activities to be considered'> <'number of streams * number of antennas'>
+```
+ e.g., python CSI_doppler_create_datasets_cross_val.py ./doppler_traces/ 4 2 1 256 24 E,S,W,R 4 -1.5 --bandwidth 40 --sub_band 2 --sub_sampling 1
+
+### Train the learning algorithm for HAR and assess the performance (4-fold cross-validation)
+```bash
+python CSI_network.py <'directory of the datasets'> <'sub-directories for training, comma-separated'> <'sub-directories for validation, comma-separated'> <'sub-directories for test, comma-separated'> <'length along the feature dimension (height)'> <'length along the time dimension (width)'> <'number of channels'> <'number of samples in a batch'> <'number of streams * number of antennas'> <'name prefix for the files'> <'activities to be considered, comma-separated'> <--bandwidth 'bandwidth'> <--sub-band 'index of the sub-band to consider (for 20 MHz and 40 MHz)'>
+```
+e.g.,
+python CSI_network.py ./doppler_traces/dataset_train_val_test/ 1,2 3 4 100 256 1 32 4 network E,S,W,R --bandwidth 80 --sub_band 1 --sub_sampling 1
+
+- Compute and visualize the performance metrics using the output files
+```bash
+python CSI_network_metrics.py <'sub-directories for training, comma-separated'> <'sub-directories for validation, comma-separated'> <'sub-directories for test, comma-separated'> <'activities to be considered, comma-separated'> <'name prefix for the files'> <--bandwidth 'bandwidth'> <--sub-band 'index of the sub-band to consider (for 20 MHz and 40 MHz)'>
+```
+ e.g., python CSI_network_metrics.py 1,2 3 4 E,S,W,R 091122 --bandwidth 80 --sub_band 1 --sub_sampling 3
+
+- Compute and save the performance metrics using the output files for plotting
+```bash
+python CSI_network_metrics_cross_val.py <'activities to be considered, comma-separated'> <'number of streams * number of antennas'> <'names prefix of the files, comma-separated'> <'number of directories considered'> <'number of directories for training'> <'number of directories for validation'> <--bandwidth 'bandwidth'> <--sub-band 'index of the sub-band to consider (for 20 MHz and 40 MHz)'>
+```
+ e.g., python CSI_network_metrics_cross_val.py E,S,W,R 4 trial1,trial2,trial3,trial4,trial5,trial6,trial7,trial8,trial9 4 2 1 --bandwidth 80 --sub_band 1 --sub_sampling 1
+
+- Plot the performance metrics
+```bash
+python CSI_network_metrics_cross_val_plots_different_bandwidth.py <'activities to be considered, comma-separated'> <'names prefix of the files, comma-separated'>
+```
+ e.g., python CSI_network_metrics_cross_val_plots_different_bandwidth.py E,S,W,R trial1,trial2,trial3,trial4,trial5,trial6,trial7,trial8,trial9
+
+```bash
+python CSI_network_metrics_cross_val_plots_different_samplings.py <'activities to be considered, comma-separated'> <'names prefix of the files, comma-separated'>
+```
+ e.g., python CSI_network_metrics_cross_val_plots_different_samplings.py E,S,W,R trial1,trial2,trial3,trial4,trial5,trial6,trial7,trial8,trial9
+
+```bash
+python CSI_network_metrics_cross_val_plots_different_samplings_combined.py <'activities to be considered, comma-separated'> <'names prefix of the files, comma-separated'>
+```
+ e.g., python CSI_network_metrics_cross_val_plots_different_samplings_combined.py E,S,W,R trial1,trial2,trial3,trial4,trial5,trial6,trial7,trial8,trial9
+
+### Parameters
+The results of the article are obtained with the parameters reported in the examples. For convenience, the repository also contains four pre-trained networks, i.e.,
+``091122_train_[2 3]_val_[1]_test_[4]_E,S,W,R__bandw20_RU4_sampling1_network.h5``,
+``091122_train_[2 3]_val_[1]_test_[4]_E,S,W,R__bandw40_RU2_sampling1_network.h5``,
+``091122_train_[2 3]_val_[1]_test_[4]_E,S,W,R__bandw80_RU1_sampling1_network.h5``,
+``091122_train_[2 3]_val_[1]_test_[4]_E,S,W,R__bandw80_RU1_sampling3_network.h5``.
+
+### Python and relevant libraries version
+Python >= 3.8.5
+TensorFlow >= 2.7.0
+Numpy >= 1.21.5
+Scipy = 1.4.1
+Scikit-learn = 0.23.2
+OSQP >= 0.6.1
-The dataset that will be made available contains Wi-Fi channel frequency response (CFR) data collected in an IEEE 802.11ax network through [AX CSI](https://ans.unibs.it/projects/ax-csi/).
\ No newline at end of file
+## Contact
+Francesca Meneghello
+francesca.meneghello.1@unipd.it
+github.com/francescamen
\ No newline at end of file
diff --git a/input_files/processed_files/.placeholder b/input_files/processed_files/.placeholder
new file mode 100644
index 0000000..e69de29