Skip to content

Commit

Permalink
Merge pull request #121 from matthew-schultz/74-log-using-debug-mode
Browse files Browse the repository at this point in the history
Convert prints to logging using --verbose flag
  • Loading branch information
carlosparadis authored Dec 18, 2019
2 parents 3710764 + c190837 commit 65609c5
Show file tree
Hide file tree
Showing 4 changed files with 116 additions and 13 deletions.
40 changes: 37 additions & 3 deletions egauge/script/api_egauge.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,10 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

import argparse
import configparser
import logging
import logging.handlers
import orm_egauge
import os
import pandas
Expand All @@ -23,7 +25,38 @@


SCRIPT_NAME = os.path.basename(__file__)
logging.basicConfig(filename='error.log', format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')


def set_logging_settings():
"""
Sets logging to write ERROR messages by default to ./error.log and standard output
Also writes INFO messages if there is a --verbose flag to ./error.log and standard output
"""
# parser for script arguments like --verbose
parser = argparse.ArgumentParser(description='Get reading data from egauge api and insert into database.')
# --verbose argument is True if set
parser.add_argument('-v', '--verbose', action='store_true',
help='print INFO level log messages to console and error.log')
args = parser.parse_args()

# set log level to INFO only if verbose is set
if args.verbose:
log_level = 'INFO'
else:
log_level = 'ERROR'

# configure logger which will print log messages to console (only prints ERROR level messages by default; prints INFO level messages if --verbose flag is set)
logging.basicConfig(level=log_level, format=__file__ + ': %(message)s')

# Create a handler that writes log messages to error.log file
# rotates error.log every time it reaches 100 MB to limit space usage; keeps up to 5 old error.log files
rotating_file_handler = logging.handlers.RotatingFileHandler('error.log', maxBytes=100000000, backupCount=5)
# set the message and date format of file handler
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
rotating_file_handler.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(rotating_file_handler)


# connect to database by creating a session
Expand Down Expand Up @@ -99,7 +132,7 @@ def get_data_from_api(conn, query_string):
time_window = {'t': api_start_timestamp, 'f': current_timestamp}
request = requests.get(host, params=time_window)
if request.status_code == requests.codes.ok:
print('[' + str(current_time) + '] ' + 'Request was successful' + str(request))
logging.info('[' + str(current_time) + '] ' + 'Data acquisition API request was successful for ' + query_string)
readings = pandas.read_csv(StringIO(request.text))
readings = readings.sort_values(by='Date & Time')
# # Set header=False if we don't want to append header and set index=False to remove index column.
Expand Down Expand Up @@ -167,7 +200,7 @@ def insert_readings_into_database(conn, readings, purpose_sensors):
filter(orm_egauge.Reading.purpose_id == purpose_sensor.purpose_id,
orm_egauge.Reading.upload_timestamp == current_time).\
update({'log_id':error_log_row.log_id})
print(str(rows_inserted) + ' readings(s) attempted to be inserted by ' + SCRIPT_NAME)
logging.info(str(rows_inserted) + ' readings(s) attempted to be inserted by ' + SCRIPT_NAME + ' for purpose id ' + str(purpose_sensor.purpose_id))
conn.commit()


Expand Down Expand Up @@ -198,6 +231,7 @@ def log_failure_to_connect_to_database(conn, exception, purpose_sensors):


if __name__ == '__main__':
set_logging_settings()
# start the database connection
conn = get_db_handler()
# get a list of all unique query_string's for active egauges from sensor_info table
Expand Down
42 changes: 39 additions & 3 deletions hobo/script/extract_hobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,51 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

import argparse
import configparser
import csv
import glob
import logging
import logging.handlers
import orm_hobo
import os
import pandas
import pendulum


logging.basicConfig(filename='error.log', format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
SCRIPT_NAME = os.path.basename(__file__)


def set_logging_settings():
"""
Sets logging to write ERROR messages by default to ./error.log and standard output
Also writes INFO messages if there is a --verbose flag to ./error.log and standard output
"""
# parser for script arguments like --verbose
parser = argparse.ArgumentParser(description='Get reading data from hobo csv files in project\'s hobo/script/to-insert folder and insert into database.')
# --verbose argument is True if set
parser.add_argument('-v', '--verbose', action='store_true',
help='print INFO level log messages to console and error.log')
args = parser.parse_args()

# set log level to INFO only if verbose is set
if args.verbose:
log_level = 'INFO'
else:
log_level = 'ERROR'

# configure logger which will print log messages to console (only prints ERROR level messages by default; prints INFO level messages if --verbose flag is set)
logging.basicConfig(level=log_level, format=__file__ + ': %(message)s')

# Create a handler that writes log messages to error.log file
# rotates error.log every time it reaches 100 MB to limit space usage; keeps up to 5 old error.log files
rotating_file_handler = logging.handlers.RotatingFileHandler('error.log', maxBytes=100000000, backupCount=5)
# set the message and date format of file handler
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
rotating_file_handler.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(rotating_file_handler)


def get_db_handler():
Expand Down Expand Up @@ -136,14 +170,14 @@ def insert_csv_readings_into_db(conn, csv_readings, csv_metadata, csv_filename):
if csv_readings.empty:
return
else:
print('readings extracted from csv')
logging.info('readings extracted from ' + csv_filename)
#executes if not initialized as a dataframe
elif not csv_readings:
logging.exception('csv_readings set to None')
return
new_readings, earliest_csv_timestamp, csv_modified_timestamp, query_string, latest_csv_timestamp, sensor_info_rows = csv_metadata
if not new_readings:
raise Exception("csv readings already inserted")
raise Exception('csv readings from ' + csv_filename + ' already inserted')
rows_returned = csv_readings.shape[0]
if rows_returned > 0:
for csv_reading in csv_readings.itertuples():
Expand All @@ -153,6 +187,7 @@ def insert_csv_readings_into_db(conn, csv_readings, csv_metadata, csv_filename):
last_reading_row_datetime = csv_reading[1]
#update last_updated_datetime column for relevant rows in sensor_info table
for sensor_info_row in sensor_info_rows:
logging.info('attempting to insert ' + str(rows_returned) + ' reading(s) for purpose_id ' + str(sensor_info_row.purpose_id))
# account for if csv files uploaded out of order by checking if last_reading_row_datetime is later than last_updated_datetime
if not sensor_info_row.last_updated_datetime or sensor_info_row.last_updated_datetime < last_reading_row_datetime:
conn.query(orm_hobo.SensorInfo.purpose_id).filter(orm_hobo.SensorInfo.purpose_id == sensor_info_row.purpose_id).update({"last_updated_datetime": last_reading_row_datetime})
Expand Down Expand Up @@ -221,6 +256,7 @@ def log_failure_to_insert_csv_readings_into_db(conn, csv_filename, csv_metadata,


if __name__=='__main__':
set_logging_settings()
conn = get_db_handler()
csv_filenames = glob.glob('./to-insert/*.csv')
for csv_filename in csv_filenames:
Expand Down
2 changes: 1 addition & 1 deletion init_crontab.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
# and write outputs to crontab.txt in the */script directory
script_folder = script_name.split('_')[1].split('.py')[0]
job = cron.new(command='cd ' + project_path + '/' + script_folder + '/script && '
+ 'python3 ' + script_name + ' >> crontab.txt')
+ 'python3 ' + script_name)
# schedule init_crontab job if not already scheduled
else:
job = cron.new(command='cd ' + project_path + '/ && python3 ' + str(script_name) + ' --min=' + str(args.min))
Expand Down
45 changes: 39 additions & 6 deletions webctrl/script/api_webctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,48 @@
from sqlalchemy.orm import sessionmaker

# import json #used if we want to output json file
import argparse
import configparser
import logging
import logging.handlers
import orm_webctrl
import os
import pendulum
import requests


logging.basicConfig(filename='error.log', format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
SCRIPT_NAME = os.path.basename(__file__)

def set_logging_settings():
"""
Sets logging to write ERROR messages by default to ./error.log and standard output
Also writes INFO messages if there is a --verbose flag to ./error.log and standard output
"""
# parser for script arguments like --verbose
parser = argparse.ArgumentParser(description='Get reading data from webctrl api and insert into database.')
# --verbose argument is True if set
parser.add_argument('-v', '--verbose', action='store_true',
help='print INFO level log messages to console and error.log')
args = parser.parse_args()

# set log level to INFO only if verbose is set
if args.verbose:
log_level = 'INFO'
else:
log_level = 'ERROR'

# configure logger which will print log messages to console (only prints ERROR level messages by default; prints INFO level messages if --verbose flag is set)
logging.basicConfig(level=log_level, format=__file__ + ': %(message)s')

# Create a handler that writes log messages to error.log file
# rotates error.log every time it reaches 100 MB to limit space usage; keeps up to 5 old error.log files
rotating_file_handler = logging.handlers.RotatingFileHandler('error.log', maxBytes=100000000, backupCount=5)
# set the message and date format of file handler
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
rotating_file_handler.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(rotating_file_handler)


# connect to database by creating a session
Expand Down Expand Up @@ -60,8 +93,8 @@ def get_data_from_api(sensor, conn):
if not sensor.last_updated_datetime:
raise Exception('No last_updated_datetime found')
#get webctrl user information
#returns an IndexError if there is no webctrl user in database
webctrl_user_row = conn.query(orm_webctrl.ApiAuthentication.username, orm_webctrl.ApiAuthentication.password).filter_by(script_folder=orm_webctrl.SensorInfo.ScriptFolderEnum.webctrl)[0]
# returns a TypeError if there are no users in database
api_user = webctrl_user_row[0]
api_pass = webctrl_user_row[1]
host = 'http://www.soest.hawaii.edu/hneienergy/bulktrendserver/read'
Expand All @@ -75,7 +108,7 @@ def get_data_from_api(sensor, conn):
auth = (api_user, api_pass)
readings = requests.post(host, params=params, auth=tuple(auth))
if readings.status_code == requests.codes.ok:
print('API request was successful' + str(readings))
logging.info('[' + str(current_time) + '] ' + 'Data acquisition API request was successful for ' + sensor.query_string)
error_log_row = orm_webctrl.ErrorLog(datetime=current_time, was_success=True, purpose_id=sensor.purpose_id, pipeline_stage=orm_webctrl.ErrorLog.PipelineStageEnum.data_acquisition)
conn.add(error_log_row)
conn.commit()
Expand All @@ -101,8 +134,7 @@ def insert_readings_into_database(conn, readings, sensor):
sensor_json_data = readings.json()
# query_string = sensor_json_data[0]['id']
readings = sensor_json_data[0]['s']
#TEST
print(str(len(readings)) + ' readings obtained', )
logging.info(str(len(readings)) + ' reading(s) obtained', )
# reset this value (used to detect readings with duplicate timestamps) before start of reading insertion for loop
previous_reading_time = 0
for reading in readings:
Expand Down Expand Up @@ -148,7 +180,7 @@ def insert_readings_into_database(conn, readings, sensor):
orm_webctrl.Reading.upload_timestamp == current_time). \
update({'log_id': error_log_row.log_id})
conn.commit()
print(rows_inserted, ' row(s) inserted')
logging.info(str(rows_inserted) + ' row(s) inserted for purpose_id ' + str(sensor.purpose_id))


#log_failure_to_get_readings_from_webctrl_api
Expand All @@ -174,6 +206,7 @@ def log_failure_to_connect_to_database(conn, exception, sensor):


if __name__ == '__main__':
set_logging_settings()
# connect to the database
conn = get_db_handler()
sensors = conn.query(orm_webctrl.SensorInfo.purpose_id, orm_webctrl.SensorInfo.query_string, orm_webctrl.SensorInfo.last_updated_datetime, orm_webctrl.SensorInfo.unit).filter_by(script_folder=orm_webctrl.SensorInfo.ScriptFolderEnum.webctrl, is_active=True)
Expand Down

0 comments on commit 65609c5

Please sign in to comment.