|
|
@ -39,22 +39,27 @@ class JsonImporter(AbstractImporter): |
|
|
|
|
|
|
|
|
|
|
|
def import_data(self): |
|
|
|
def import_data(self): |
|
|
|
raw_data = self.read_json_file() |
|
|
|
raw_data = self.read_json_file() |
|
|
|
|
|
|
|
self.import_variables(raw_data) |
|
|
|
self.import_trajectories(raw_data) |
|
|
|
self.import_trajectories(raw_data) |
|
|
|
self.compute_row_delta_in_all_samples_frames(self.time_key) |
|
|
|
self.compute_row_delta_in_all_samples_frames(self.time_key) |
|
|
|
self.clear_data_frame_list() |
|
|
|
self.clear_data_frame_list() |
|
|
|
self.import_structure(raw_data) |
|
|
|
self.import_structure(raw_data) |
|
|
|
self.import_variables(raw_data, self.sorter) |
|
|
|
#self.import_variables(raw_data, self.sorter) |
|
|
|
|
|
|
|
|
|
|
|
def import_trajectories(self, raw_data: pd.DataFrame): |
|
|
|
def import_trajectories(self, raw_data: pd.DataFrame): |
|
|
|
self.normalize_trajectories(raw_data, 0, self.samples_label) |
|
|
|
self.normalize_trajectories(raw_data, 0, self.samples_label) |
|
|
|
|
|
|
|
|
|
|
|
def import_structure(self, raw_data: pd.DataFrame): |
|
|
|
def import_structure(self, raw_data: pd.DataFrame): |
|
|
|
self._df_structure = self.one_level_normalizing(raw_data, 0, self.structure_label) |
|
|
|
self._df_structure = self.one_level_normalizing(raw_data, 0, self.structure_label) |
|
|
|
|
|
|
|
#TODO Attenzione l'ordine delle vars non è alfabetico come nel dataset -> agire di conseguenza |
|
|
|
def import_variables(self, raw_data: pd.DataFrame, sorter: typing.List): |
|
|
|
#Ordinando la vars alfabeticamente |
|
|
|
|
|
|
|
def import_variables(self, raw_data: pd.DataFrame): |
|
|
|
self._df_variables = self.one_level_normalizing(raw_data, 0, self.variables_label) |
|
|
|
self._df_variables = self.one_level_normalizing(raw_data, 0, self.variables_label) |
|
|
|
|
|
|
|
self.sorter = self._df_variables[self.variables_key].to_list() |
|
|
|
|
|
|
|
self.sorter.sort() |
|
|
|
|
|
|
|
print("Sorter:", self.sorter) |
|
|
|
self._df_variables[self.variables_key] = self._df_variables[self.variables_key].astype("category") |
|
|
|
self._df_variables[self.variables_key] = self._df_variables[self.variables_key].astype("category") |
|
|
|
self._df_variables[self.variables_key] = self._df_variables[self.variables_key].cat.set_categories(sorter) |
|
|
|
self._df_variables[self.variables_key] = self._df_variables[self.variables_key].cat.set_categories(self.sorter) |
|
|
|
self._df_variables = self._df_variables.sort_values([self.variables_key]) |
|
|
|
self._df_variables = self._df_variables.sort_values([self.variables_key]) |
|
|
|
|
|
|
|
|
|
|
|
def read_json_file(self) -> typing.List: |
|
|
|
def read_json_file(self) -> typing.List: |
|
|
@ -105,7 +110,7 @@ class JsonImporter(AbstractImporter): |
|
|
|
self.df_samples_list = [pd.DataFrame(sample) for sample in raw_data[indx][trajectories_key]] |
|
|
|
self.df_samples_list = [pd.DataFrame(sample) for sample in raw_data[indx][trajectories_key]] |
|
|
|
#for sample_indx, sample in enumerate(raw_data[indx][trajectories_key]): |
|
|
|
#for sample_indx, sample in enumerate(raw_data[indx][trajectories_key]): |
|
|
|
#self.df_samples_list.append(pd.DataFrame(sample)) |
|
|
|
#self.df_samples_list.append(pd.DataFrame(sample)) |
|
|
|
self.sorter = list(self.df_samples_list[0].columns.values)[1:] |
|
|
|
#self.sorter = list(self.df_samples_list[0].columns.values)[1:] #TODO Qui ci deve essere la colonna NAME ordinata alfabeticamente |
|
|
|
|
|
|
|
|
|
|
|
def compute_row_delta_sigle_samples_frame(self, sample_frame: pd.DataFrame, time_header_label: str, |
|
|
|
def compute_row_delta_sigle_samples_frame(self, sample_frame: pd.DataFrame, time_header_label: str, |
|
|
|
columns_header: typing.List, shifted_cols_header: typing.List) \ |
|
|
|
columns_header: typing.List, shifted_cols_header: typing.List) \ |
|
|
@ -122,10 +127,19 @@ class JsonImporter(AbstractImporter): |
|
|
|
#columns_header = list(self.df_samples_list[0].columns.values) |
|
|
|
#columns_header = list(self.df_samples_list[0].columns.values) |
|
|
|
#self.sorter = columns_header[1:] |
|
|
|
#self.sorter = columns_header[1:] |
|
|
|
shifted_cols_header = [s + "S" for s in self.sorter] |
|
|
|
shifted_cols_header = [s + "S" for s in self.sorter] |
|
|
|
for indx, sample in enumerate(self.df_samples_list): |
|
|
|
compute_row_delta = self.compute_row_delta_sigle_samples_frame |
|
|
|
|
|
|
|
"""for indx, sample in enumerate(self.df_samples_list): |
|
|
|
self.df_samples_list[indx] = self.compute_row_delta_sigle_samples_frame(sample, |
|
|
|
self.df_samples_list[indx] = self.compute_row_delta_sigle_samples_frame(sample, |
|
|
|
time_header_label, self.sorter, shifted_cols_header) |
|
|
|
time_header_label, self.sorter, shifted_cols_header)""" |
|
|
|
|
|
|
|
self.df_samples_list = [compute_row_delta(sample, time_header_label, self.sorter, shifted_cols_header) for sample in self.df_samples_list] |
|
|
|
self._concatenated_samples = pd.concat(self.df_samples_list) |
|
|
|
self._concatenated_samples = pd.concat(self.df_samples_list) |
|
|
|
|
|
|
|
#TODO Attenzione la colonna di indice 0 non è sempre quella del tempo ordinare il daframe concatenato di conseguenza |
|
|
|
|
|
|
|
complete_header = self.sorter[:] |
|
|
|
|
|
|
|
complete_header.insert(0,'Time') |
|
|
|
|
|
|
|
complete_header.extend(shifted_cols_header) |
|
|
|
|
|
|
|
print("Complete Header", complete_header) |
|
|
|
|
|
|
|
self._concatenated_samples = self._concatenated_samples[complete_header] |
|
|
|
|
|
|
|
print("Concat Samples",self._concatenated_samples) |
|
|
|
|
|
|
|
|
|
|
|
def build_list_of_samples_array(self, data_frame: pd.DataFrame) -> typing.List: |
|
|
|
def build_list_of_samples_array(self, data_frame: pd.DataFrame) -> typing.List: |
|
|
|
""" |
|
|
|
""" |
|
|
@ -152,7 +166,7 @@ class JsonImporter(AbstractImporter): |
|
|
|
self._concatenated_samples = self._concatenated_samples.iloc[0:0] |
|
|
|
self._concatenated_samples = self._concatenated_samples.iloc[0:0] |
|
|
|
|
|
|
|
|
|
|
|
def clear_data_frame_list(self): |
|
|
|
def clear_data_frame_list(self): |
|
|
|
for indx in range(len(self.df_samples_list)): # Le singole traj non servono più |
|
|
|
for indx in range(len(self.df_samples_list)): # Le singole traj non servono più #TODO usare list comprens |
|
|
|
self.df_samples_list[indx] = self.df_samples_list[indx].iloc[0:0] |
|
|
|
self.df_samples_list[indx] = self.df_samples_list[indx].iloc[0:0] |
|
|
|
|
|
|
|
|
|
|
|
def import_sampled_cims(self, raw_data: pd.DataFrame, indx: int, cims_key: str) -> typing.Dict: |
|
|
|
def import_sampled_cims(self, raw_data: pd.DataFrame, indx: int, cims_key: str) -> typing.Dict: |
|
|
|