diff --git a/src/common.py b/src/common.py index cddcb42..eba50e7 100644 --- a/src/common.py +++ b/src/common.py @@ -4,7 +4,19 @@ import datetime -db_file = 'db.csv' +DB_FILE = 'db.csv' + + +LOG_LEVELS = { + 'NONE': 0, + 'ERROR': 1, + 'WARNING': 2, + 'SUCCESS': 3, + 'INFO': 4, +} + + +LOG_LEVEL = LOG_LEVELS['INFO'] def read_csv(filename): @@ -47,7 +59,8 @@ def log_happy(msg): Args: msg (str): Happy message """ - print(f'{colorama.Fore.GREEN}{datetime.datetime.now()} [SUCCESS] {msg}{colorama.Style.RESET_ALL}') + if LOG_LEVEL >= LOG_LEVELS['SUCCESS']: + print(f'{colorama.Fore.GREEN}{datetime.datetime.now()} [SUCCESS] {msg}{colorama.Style.RESET_ALL}') def log_info(msg): @@ -56,7 +69,8 @@ def log_info(msg): Args: msg (str): Info message """ - print(f'{colorama.Style.RESET_ALL}{datetime.datetime.now()} [INFO] {msg}{colorama.Style.RESET_ALL}') + if LOG_LEVEL >= LOG_LEVELS['INFO']: + print(f'{colorama.Style.RESET_ALL}{datetime.datetime.now()} [INFO] {msg}{colorama.Style.RESET_ALL}') def log_warning(msg): @@ -65,7 +79,8 @@ def log_warning(msg): Args: msg (str): Warning message """ - print(f'{colorama.Fore.YELLOW}{datetime.datetime.now()} [WARNING] {msg}{colorama.Style.RESET_ALL}') + if LOG_LEVEL >= LOG_LEVELS['WARNING']: + print(f'{colorama.Fore.YELLOW}{datetime.datetime.now()} [WARNING] {msg}{colorama.Style.RESET_ALL}') def log_error(msg): @@ -74,4 +89,5 @@ def log_error(msg): Args: msg (str): Error message """ - print(f'{colorama.Fore.RED}{datetime.datetime.now()} [ERROR] {msg}{colorama.Style.RESET_ALL}') \ No newline at end of file + if LOG_LEVEL >= LOG_LEVELS['ERROR']: + print(f'{colorama.Fore.RED}{datetime.datetime.now()} [ERROR] {msg}{colorama.Style.RESET_ALL}') \ No newline at end of file diff --git a/src/db_console.py b/src/db_console.py index 4d994fe..ee23110 100644 --- a/src/db_console.py +++ b/src/db_console.py @@ -3,16 +3,16 @@ def drop_by_ip(ip): - data = read_csv(db_file) + data = read_csv(DB_FILE) data = [x for x in data if x[0] != ip] - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy(f"Deleted {ip}") def drop_by_hostname(hostname): - data = read_csv(db_file) + data = read_csv(DB_FILE) data = [x for x in data if x[1] != hostname] - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy(f"Deleted {hostname}") @@ -23,10 +23,10 @@ def insert_entry(ip, hostname, comment): return if not comment: comment = "" - data = read_csv(db_file) + data = read_csv(DB_FILE) new_data = pd.DataFrame([[ip, hostname, comment]], columns=["ipv4", "hostname", "comment"]) data = pd.concat([data, new_data], ignore_index=True) - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy(f"Inserted \"{ip}\",\"{hostname}\",\"{comment}\"") @@ -39,13 +39,13 @@ def insert_ip_list(filename, hostname, comment): if not filename: log_warning("Filename is required") return - data = read_csv(db_file) + data = read_csv(DB_FILE) new_data = pd.read_csv(filename, header=None, names=["ipv4"]) new_data["hostname"] = hostname new_data["comment"] = comment log_info(f"Inserting {len(new_data)} IPs from {filename} with hostname \"{hostname}\" and comment \"{comment}\"") data = pd.concat([data, new_data], ignore_index=True) - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy(f"Inserted IPs from {filename}") @@ -62,7 +62,7 @@ def drop(parameters, arguments): if 'comment' in parameters and 'comment-part' in parameters: log_warning("Cannot use both --comment and --comment-part") return - data_pick = read_csv(db_file) + data_pick = read_csv(DB_FILE) if 'ipv4' in parameters: data_pick = data_pick[data_pick['ipv4'] == arguments['ipv4']] if 'hostname' in parameters: @@ -71,7 +71,7 @@ def drop(parameters, arguments): data_pick = data_pick[data_pick['comment'] == arguments['comment']] if 'comment-part' in parameters: data_pick = data_pick[data_pick['comment'].fillna('').str.contains(arguments['comment-part'])] - data_drop = read_csv(db_file) + data_drop = read_csv(DB_FILE) log_info(f"Dropping {len(data_pick)} entries") ans = None if len(data_pick) != 0: @@ -81,7 +81,7 @@ def drop(parameters, arguments): log_info("Aborted") return data_drop = data_drop[~data_drop.isin(data_pick)].dropna() - write_csv(data_drop, db_file) + write_csv(data_drop, DB_FILE) log_happy(f"Dropped {len(data_pick)} entries") diff --git a/src/make_amnezia.py b/src/make_amnezia.py index 4edc909..ce215a7 100644 --- a/src/make_amnezia.py +++ b/src/make_amnezia.py @@ -1,7 +1,9 @@ from common import * + def make_amnezia(): - data = read_csv(db_file) + log_info('make_amnezia: Starting') + data = read_csv(DB_FILE) # use ipv4 as hostname data['hostname'] = data['ipv4'] # save only hostnames @@ -9,6 +11,8 @@ def make_amnezia(): json_data = data.to_dict(orient='records') write_json(json_data, 'out/amnezia_vpn.json') log_happy('Amnezia VPN tunneling file created') + log_info('make_amnezia: Finished') + if __name__ == '__main__': make_amnezia() \ No newline at end of file diff --git a/src/maskt2ipv4.py b/src/maskt2ipv4.py index 3be2360..16152f1 100644 --- a/src/maskt2ipv4.py +++ b/src/maskt2ipv4.py @@ -2,9 +2,10 @@ from common import * def maskt2ipv4(): + log_info('maskt2ipv4: Starting') cidr_count = 0 converted_ip_count = 0 - data = read_csv(db_file) + data = read_csv(DB_FILE) # if ipv4 contains / then it is a masked ip range masked = data[data['ipv4'].str.contains('/')] # convert masked ip range to list of ip addresses @@ -24,8 +25,10 @@ def maskt2ipv4(): data = data[~data['ipv4'].str.contains('/')] # append ip list to data data = pd.concat([data, pd.DataFrame(ip_list)], ignore_index=True) - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy('Converted {} masked IP ranges to {} IP addresses'.format(cidr_count, converted_ip_count)) + log_info('maskt2ipv4: Finished') + if __name__ == '__main__': maskt2ipv4() diff --git a/src/sort_db.py b/src/sort_db.py index 1dc2481..50435e0 100644 --- a/src/sort_db.py +++ b/src/sort_db.py @@ -4,6 +4,7 @@ from os import listdir from os.path import isfile, join + def drop_duplicates(data): log_info('drop_duplicates: Starting') # if there is ip address and CIDR that contains that address, remove solo ip address @@ -24,15 +25,17 @@ def drop_duplicates(data): log_info('drop_duplicates: Finished') return data + def sort_db(): log_info('sort_db: Starting') - data = read_csv(db_file) + data = read_csv(DB_FILE) data = data.sort_values(by=['hostname']) data = drop_duplicates(data) - write_csv(data, db_file) + write_csv(data, DB_FILE) log_happy('Database sorted') log_info('sort_db: Finished') + def drop_duplicates_in_known(data): log_info("drop_duplicates_in_known: Starting") original_data = data.copy() @@ -61,6 +64,7 @@ def drop_duplicates_in_known(data): log_info("drop_duplicates_in_known: Finished") return data + def sort_known(): log_info("sort_known: Starting") onlyfiles = [f for f in listdir('in/known') if isfile(join('in/known', f))] diff --git a/src/sort_readme.py b/src/sort_readme.py index 5592262..635f5ab 100644 --- a/src/sort_readme.py +++ b/src/sort_readme.py @@ -2,7 +2,9 @@ # catch
and
in README.md # sort the lines between them alphabetically + def sort_readme(): + log_info('sort_readme: Starting') with open('README.md', 'r') as f: readme = f.readlines() start = readme.index('
\n') + 1 @@ -11,6 +13,8 @@ def sort_readme(): with open('README.md', 'w') as f: f.writelines(sorted_readme) log_happy('README.md sorted') + log_info('sort_readme: Finished') + if __name__ == '__main__': sort_readme() \ No newline at end of file