-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathetl.py
132 lines (100 loc) · 4.34 KB
/
etl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
import datetime
def process_song_file(cur, filepath):
"""
This procedure processes a song file whose filepath has been provided as an arugment.
It extracts the song information in order to store it into the songs table.
Then it extracts the artist information in order to store it into the artists table.
INPUTS:
* cur : the cursor variable
* filepath : the file path to the song file
RETURNS: NONE
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data = list(df[['song_id','title','artist_id','year','duration']].values[0])
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = list(df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']].values[0])
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
"""
This procedure processes a log file whose filepath has been provided as an arugment.
It extracts the time information and convert to datetime datatype in order to store it into the time table.
Then it extracts the user information in order to store it into the users table.
Finally, It finds song_id and artist_id data from song and artist table by specific columns (song, artist, length).
Then It inserts song play record data into songplays table.
INPUTS:
* cur : the cursor variable
* filepath : the file path to the log file
RETURNS: NONE
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df.loc[df['page']=="NextSong"]
# convert timestamp column to datetime
t = df.assign(ts=[datetime.datetime.fromtimestamp(ts/1000) for ts in list(df['ts'])])
# insert time data records
time_data = (t['ts'].astype(int) // 10**6, t['ts'].dt.hour, t['ts'].dt.day, t['ts'].dt.week, t['ts'].dt.month, t['ts'].dt.year, t['ts'].dt.weekday)
column_labels = ("timestamp", "hour", "day", "week_of_year", "month", "year", "weekday")
time_df = pd.DataFrame({column_labels[num]:time_data[num] for num in range(len(column_labels))})
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[['userId','firstName','lastName','gender','level']]
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
"""
This procedure handle song/log files before processing query jobs.
INPUTS:
* cur : the cursor variable
* conn : the psycopg2.connect variable
* filepath : the file path to the song/log file
* func : the function that proceeding to process song/log file
RETURNS: NONE
"""
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
"""
main
"""
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main()