Skip to content

Commit

Permalink
Automated autopep8 fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
autopep8 bot committed Dec 17, 2024
1 parent 18e76f3 commit e2cebc2
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 24 deletions.
36 changes: 18 additions & 18 deletions benchmark/automl_forecasting.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,13 +443,14 @@
# Parameters
# full_file_path_and_name - complete .tsf file path
# replace_missing_vals_with - a term to indicate the missing values in series in the returning dataframe
# value_column_name - Any name that is preferred to have as the name of the column containing series values in the returning dataframe
# value_column_name - Any name that is preferred to have as the name of
# the column containing series values in the returning dataframe
def convert_tsf_to_dataframe(full_file_path_and_name, replace_missing_vals_with='NaN',
value_column_name='series_value'):
try:
with open(full_file_path_and_name, 'r', encoding='utf-8') as file:
return parse_file(file, replace_missing_vals_with, value_column_name)
except:
except BaseException:
with open(full_file_path_and_name, 'r', encoding='cp1252') as file:
return parse_file(file, replace_missing_vals_with, value_column_name)

Expand Down Expand Up @@ -564,9 +565,8 @@ def parse_file(file, replace_missing_vals_with, value_column_name):
full_info[i], '%Y-%m-%d %H-%M-%S'
)
else:
raise Exception(
'Invalid attribute type.'
) # Currently, the code supports only numeric, string and date types. Extend this as required.
# Currently, the code supports only numeric, string and date types. Extend this as required.
raise Exception('Invalid attribute type.')

if att_val is None:
raise Exception('Invalid attribute value.')
Expand Down Expand Up @@ -672,8 +672,8 @@ def regression_scores(actual, predicted, y_train,
if 'duration' in kwargs.keys():
results['duration'] = kwargs['duration']

if scores_dir != None:
if forecaster_name == None:
if scores_dir is not None:
if forecaster_name is None:
raise TypeError('Forecaster name required to save scores')
os.makedirs(scores_dir, exist_ok=True)

Expand Down Expand Up @@ -732,7 +732,7 @@ def correlation(actual, predicted, method='pearson'):
try:
correlation = result.correlation
pvalue = result.pvalue
except: # older scipy versions returned a tuple instead of an object
except BaseException: # older scipy versions returned a tuple instead of an object
correlation = result[0]
pvalue = result[1]

Expand Down Expand Up @@ -779,12 +779,12 @@ def write_to_csv(path, results):
HEADERS.insert(0, HEADERS.pop(HEADERS.index('model')))

for key, value in results.items():
if value == None or value == '':
if value is None or value == '':
results[key] = 'None'

try:
Utils._write_to_csv(path, results, HEADERS)
except OSError as _:
except OSError:
# try a second time: permission error can be due to Python not
# having closed the file fast enough after the previous write
time.sleep(1) # in seconds
Expand Down Expand Up @@ -847,25 +847,25 @@ def save_plot(title,
:param yscale: Y-Scale ('linear' or 'log'), defaults to 'linear'
"""

if xlabel != None:
if xlabel is not None:
plt.xlabel(xlabel)

if ylabel != None:
if ylabel is not None:
plt.ylabel(ylabel)

plt.yscale(yscale)

plt.title(title)
plt.suptitle(suptitle)

if legend != None:
if legend is not None:
plt.legend(legend, loc='upper left')

# Show plot
if show:
plt.show()
# Show plot as file
if save_path != None:
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight')

# Clear for next plot
Expand Down Expand Up @@ -910,7 +910,7 @@ def split_test_set(test_df, horizon):
for _ in range(0, len(test_df) - 1, horizon): # The -1 is because the last split may be less than horizon
try:
test_splits.append(test_df.iloc[total:total + horizon, :])
except: # If 1D (series)
except BaseException: # If 1D (series)
test_splits.append(test_df.iloc[total:total + horizon])
total += horizon

Expand Down Expand Up @@ -1160,11 +1160,11 @@ def save_heatmap(df, csv_path, png_path):
heatmap.to_csv(csv_path) # Save correlations as CSV
heatmap.to_latex(csv_path.replace('.csv', '.tex')) # Save correlations as .tex
try:
calculate_pvalues = lambda x, y: pearsonr(x, y).pvalue
def calculate_pvalues(x, y): return pearsonr(x, y).pvalue
df[columns].corr(method=calculate_pvalues).to_csv(csv_path.replace('.csv', '_pvalues.csv'))
# older scipy versions return a tuple instead of an object
except:
calculate_pvalues = lambda x, y: pearsonr(x, y)[1]
except BaseException:
def calculate_pvalues(x, y): return pearsonr(x, y)[1]
df[columns].corr(method=calculate_pvalues).to_csv(csv_path.replace('.csv', '_pvalues.csv'))

# Save correlation heatmap as image
Expand Down
4 changes: 2 additions & 2 deletions benchmark/benchmark_TSF.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def evaluate_loop(self, dataset, experiment_setup: dict = None):
experiment_setup['task_params'] = TsForecastingParams(
forecast_length=M4_FORECASTING_LENGTH[dataset[0]])
target = train_data.iloc[-experiment_setup['task_params']
.forecast_length:, :].values.ravel()
.forecast_length:, :].values.ravel()
train_data = train_data.iloc[:-
experiment_setup['task_params'].forecast_length, :]
experiment_setup['task_params'].forecast_length, :]
model = FedotIndustrial(**experiment_setup)
model.fit(train_data)
prediction = model.predict(train_data)
Expand Down
8 changes: 4 additions & 4 deletions benchmark/feature_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def format_univariate_forecasting_data(data_dir, return_df: bool = False):
meta_data['step_size'] = []
try:
files = os.listdir(data_dir)
except:
except BaseException:
files = os.listdir(os.path.join(PROJECT_PATH, data_dir))

csv_files = [f for f in files if '0_metadata.csv' not in f and f.endswith('csv')]
Expand Down Expand Up @@ -510,7 +510,7 @@ def format_global_forecasting_data(data_dir, gather_metadata=False):
data, freq, horizon, has_nans, equal_length = convert_tsf_to_dataframe(
os.path.join(data_dir, tsf_file), 'NaN', 'value')

if horizon == None:
if horizon is None:
horizon = DatasetFormatting.select_horizon(freq, csv_path)

if gather_metadata:
Expand Down Expand Up @@ -568,7 +568,7 @@ def select_horizon(freq, csv_path):
# The following horizons are suggested by Godahewa et al. (2021)
elif 'solar_weekly_dataset' in csv_path:
horizon = 5
elif freq == None:
elif freq is None:
raise ValueError('No frequency or horizon found in file')
elif freq == 'monthly':
horizon = 12
Expand Down Expand Up @@ -684,7 +684,7 @@ def format_3W_data(data_dir):
:param data_dir: Path to directory of datasets
"""
subdir = os.path.join(data_dir, '3W')
os.path.join(data_dir, '3W')

@staticmethod
def format_falling_data(data_dir):
Expand Down

0 comments on commit e2cebc2

Please sign in to comment.