diff --git a/processor/DldProcessor.py b/processor/DldProcessor.py index be8a3f4..96a23f3 100644 --- a/processor/DldProcessor.py +++ b/processor/DldProcessor.py @@ -301,6 +301,7 @@ def readDataframes(self, fileName=None, path=None, format='parquet'): else: fileName = 'run{}'.format(self.runNumber) fullName = path + fileName # TODO: test if naming is correct + print(f'Loading {format} data from {fileName}') if format == 'parquet': self.dd = dask.dataframe.read_parquet(fullName + "_el") @@ -316,7 +317,7 @@ def readDataframes(self, fileName=None, path=None, format='parquet'): self.ddMicrobunches = dask.dataframe.read_hdf( fullName, '/microbunches', mode='r', chunksize=self.CHUNK_SIZE) self.printRunOverview() - print(f'Loaded data form {format} file') + print(f'Loading complete.') def appendDataframeParquet(self, fileName, path=None): """ Append data to an existing dask Parquet dataframe.