-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
69 lines (52 loc) · 2.47 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import threading
from collections import OrderedDict
from concurrent.futures import thread
from time import sleep
import pandas
from authors.appWithAuthorsAnalysis import analyzeAuthors
from authors.authorsExtractor import extractAuthors
from authors.frequency import frequencyAuthors
from authors.structure import Structure
from fileNames import files
# A program to produce sentiment analysis output data on Google Play Store reviews using VADER Sentiment Analysis
from sentimentAnalysis import sentiment
def main():
# Give main path name for data
mainPath = '/Users/jaime/Documents/York_University/Winter_2021/Data_Vizualization/Project/gitlab/datasets/Google'
# Call file name function to get all file names and pathways
readFile = files(mainPath)
# Create statistic file
statisticFile = open('/Users/jaime/Documents/York_University/Winter_2021/Data_Vizualization/Project/sentiment_statistics.csv', 'w', encoding='utf8')
# Create name file
# authorsFile = open('/Users/jaime/Documents/York_University/Winter_2021/Data_Vizualization/Project/Names/names.csv', 'w', encoding='utf8')
# Create name file
authorsFileReading = open('/Users/jaime/Documents/York_University/Winter_2021/Data_Vizualization/Project/Names/names.csv', 'r', encoding='utf8')
# Create name file
# authorsFinalAnalysis= open('/Users/jaime/Documents/York_University/Winter_2021/Data_Vizualization/Project/Names/finalAuthors.csv', 'w', encoding='utf8')
# ----------------- sentiment analysis --------------------------
# Add columns
statisticFile.write('App,App ID,Reviews,neg,neu,pos, Sentiment\n')
# Call sentiment function to get csv output files for all of the applications with English reviews------
for i in range(len(readFile[0])):
sentiment(readFile[0][i], readFile[1][i], statisticFile)
# -----------------Step needed to get all authors-------------------------------------
# Extract the name of authors to create network
# for i in range(len(readFile[0])):
# extractAuthors(readFile[0][i], authorsFile)
# # -------------- for generating csv with apps and authors ---------------------------
# authorListAux = []
#
# for i in authorsFileReading:
# authorListAux.append(i[0: len(i)-1])
#
# authorListAux.sort()
#
# # adds the common apps
# analyzeAuthors(authorListAux)
#
# sleep(4)
# # to count the frequency
# frequencyAuthors()
# Run Main
if __name__ == "__main__":
main()