-
Notifications
You must be signed in to change notification settings - Fork 12
/
preprocessing.py
326 lines (305 loc) · 12.8 KB
/
preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
from __future__ import division
from nltk.stem.snowball import *
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.sentiment.util import *
from nltk import tokenize
from datetime import datetime
import nltk
import numpy as np
import string
import csv
import sys
import os
"""
TODO: STORE SENTIMENT INFO IN A WAY THAT ALLOWS US TO CREATE RUNNING SENTIMENT GRAPHS
- Cut text into 20 or so roughly equal sized chunks of sentences
- Store mean positive and negative sentiment values for each segment as a feature
- Create graphs of sentiment movement for data viz (upper bound == max value observed, lower bound == 0)
"""
startTime = datetime.now()
#need this or else it throws encoding/decoding errors
reload(sys)
sys.setdefaultencoding('utf8')
punct = set(['!', '#', '"', '%', '$', '&', '(', '+', '*', '-', ',', '/', '.', ';', ':', '=', '<', '?', '>', '@', '[', ']', '_', '^', '`', '{', '~'])
table = string.maketrans("","")
target = open("output_POS.txt", 'w')
#check avg sent size
target.write("book_name|total_words|avg_sentence_size|!|#|''|%|$|&|(|+|*|-|,|/|.|;|:|=|<|?|>|@|[|]|_|^|`|{|~|neg|neu|pos|compound|ID|Title|Author|CC|CD|DT|EX|FW|IN|JJ|JJR|JJS|LS|MD|NN|NNP|NNPS|NNS|PDT|PRP|PRP$|RB|RBR|RBS|RP|VB|VBD|VBG|VBP|VBN|WDT|VBZ|WRB|WP$|WP|")
target.write('\n')
def ensure_unicode(v):
v = v.decode('utf-8', errors='ignore')
return v
def punctAndWordsInSentence(listOfCharacters):
punctuation_dict = {}
sentenceCounter = 0;
wordCounter = 0
periodCounter = 0;
avgSentenceSize = 0;
totalWords = 0;
punctCounter = 0;
"""
Iterate through all characters. Count periods, punct frequencies. WordCounter = words in sentence (resets
to zero after a period). totalWords is the book's total word count.
"""
#sentence count
for i in range(len(listOfCharacters)):
if i != 0:
#if lettter followed by space or punct, then word count +=1
if (listOfCharacters[i] == " " or str(listOfCharacters[i]) in punct) and str(listOfCharacters[i-1]) in (string.ascii_lowercase + string.ascii_uppercase):
totalWords = totalWords + 1
#count periods as well
if listOfCharacters[i] == ".":
periodCounter = periodCounter + 1
if listOfCharacters[i] in punct:
punctCounter = punctCounter + 1
if listOfCharacters[i] in punctuation_dict:
punctuation_dict[listOfCharacters[i]] = punctuation_dict[listOfCharacters[i]] + 1
else:
punctuation_dict[listOfCharacters[i]] = 1
avgSentenceSize = (totalWords/periodCounter)
#put together output, bar delimited
target.write(str(totalWords) + "|")
target.write(str(avgSentenceSize) + "|")
for i in punct:
s = ""
if i in punctuation_dict:
s = s + str(punctuation_dict[i] / punctCounter) + "|" #pct of punct that is [x]
else:
s = s + str(0) + "|" #0 if unused
target.write(s)
def pos_tagging(content):
parts = ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNP", "NNPS", "NNS", "PDT", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "VB", "VBD", "VBG", "VBP", "VBN", "WDT", "VBZ", "WRB", "WP$", "WP" ]
content = ensure_unicode(content) #see if this fixes the error
text = nltk.word_tokenize(content) #need to tokenize first
results = nltk.pos_tag(text)
#dict of {POS: count}
results_dict = {}
counter = 0
for tag in results:
token = tag[0]
pos = tag[1]
counter += 1
if pos in results_dict:
results_dict[pos] += 1
else:
results_dict[pos] = 1
#write to file
for i in parts:
s = ""
if i in results_dict:
s = s + str(results_dict[i]/float(counter)) + "|" #pct of POS that are [x]
else:
s = s + str(0) + "|" #0 if unused
target.write(s)
def decode_file(text):
return text.decode('utf-8', errors='replace')
def get_title_author(text):
author = "NULL"
title = "NULL"
text = text.splitlines()
#for line in text, check if title or author stored there
for i in range(80):
#error handling since some texts are <80 lines
try:
if "Title: " in text[i]:
title = text[i][7:]
if "Author: " in text[i]:
author = text[i][8:]
#if they have both been found, do not waste extra time iterating
if title != "NULL" and author != "NULL":
title_author_tuple = (title, author)
return title_author_tuple
except:
pass
title_author_tuple = (title, author)
return title_author_tuple
def get_sentiment(temp):
temp = temp.replace('\n', '')
temp = temp.replace('\r', '')
content = decode_file(temp)
content = tokenize.sent_tokenize(content)
#get author and title now that content is split by sentence
sid = SentimentIntensityAnalyzer()
booksent = []
for sentence in content:
ss = sid.polarity_scores(sentence)
ssarray = [ss['neg'],ss['neu'],ss['pos'], ss['compound']]
booksent.append(ssarray)
valuearray = np.array(booksent)
# mean negative, neutral, positive, compound score for all lines in book
values = np.mean(valuearray, axis=0)
return values
def preprocessing():
'''
read file as a list of words
set lowercase, stem, remove stopwords
get punctuation string for later feature extraction
save local wordcount dict
save global word dict after finished looping through docs
'''
counter = 0
for book in os.listdir("/Users/jamesledoux/Documents/Drew"):
if not book.startswith('.'): #pass hidden files such as .DS_STORE
book = str(book)
with open("/Users/jamesledoux/Documents/Drew/" + book, 'rb') as f:
content = f.read().rstrip('\n')
target.write(book + "|")
punctAndWordsInSentence(content)
sentiment_values = get_sentiment(content)
neg = sentiment_values[0]
neu = sentiment_values[1]
pos = sentiment_values[2]
compound = sentiment_values[3]
target.write(str(neg) + "|" + str(neu) + "|" + str(pos) + "|" + str(compound) + "|" + str(counter) + "|")
title_author_tuple = get_title_author(content)
target.write(str(title_author_tuple[0]) + "|" + str(title_author_tuple[1]) + "|")
pos_tagging(content)
target.write('\n')
f.close()
counter += 1
if counter%20 == 0:
print "book " + str(counter) + " done: " + book
preprocessing()
print datetime.now() - startTime
"""
POS Tagging Key:
$: dollar
$ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$
'': closing quotation mark
' ''
(: opening parenthesis
( [ {
): closing parenthesis
) ] }
,: comma
,
--: dash
--
.: sentence terminator
. ! ?
:: colon or ellipsis
: ; ...
CC: conjunction, coordinating
& 'n and both but either et for less minus neither nor or plus so
therefore times v. versus vs. whether yet
CD: numeral, cardinal
mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-
seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025
fifteen 271,124 dozen quintillion DM2,000 ...
DT: determiner
all an another any both del each either every half la many much nary
neither no some such that the them these this those
EX: existential there
there
FW: foreign word
gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous
lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte
terram fiche oui corporis ...
IN: preposition or conjunction, subordinating
astride among uppon whether out inside pro despite on by throughout
below within for towards near behind atop around if like until below
next into if beside ...
JJ: adjective or numeral, ordinal
third ill-mannered pre-war regrettable oiled calamitous first separable
ectoplasmic battery-powered participatory fourth still-to-be-named
multilingual multi-disciplinary ...
JJR: adjective, comparative
bleaker braver breezier briefer brighter brisker broader bumper busier
calmer cheaper choosier cleaner clearer closer colder commoner costlier
cozier creamier crunchier cuter ...
JJS: adjective, superlative
calmest cheapest choicest classiest cleanest clearest closest commonest
corniest costliest crassest creepiest crudest cutest darkest deadliest
dearest deepest densest dinkiest ...
LS: list item marker
A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005
SP-44007 Second Third Three Two * a b c d first five four one six three
two
MD: modal auxiliary
can cannot could couldn't dare may might must need ought shall should
shouldn't will would
NN: noun, common, singular or mass
common-carrier cabbage knuckle-duster Casino afghan shed thermostat
investment slide humour falloff slick wind hyena override subhumanity
machinist ...
NNP: noun, proper, singular
Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos
Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA
Shannon A.K.C. Meltex Liverpool ...
NNPS: noun, proper, plural
Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists
Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques
Apache Apaches Apocrypha ...
NNS: noun, common, plural
undergraduates scotches bric-a-brac products bodyguards facets coasts
divestitures storehouses designs clubs fragrances averages
subjectivists apprehensions muses factory-jobs ...
PDT: pre-determiner
all both half many quite such sure this
POS: genitive marker
' 's
PRP: pronoun, personal
hers herself him himself hisself it itself me myself one oneself ours
ourselves ownself self she thee theirs them themselves they thou thy us
PRP$: pronoun, possessive
her his mine my our ours their thy your
RB: adverb
occasionally unabatingly maddeningly adventurously professedly
stirringly prominently technologically magisterially predominately
swiftly fiscally pitilessly ...
RBR: adverb, comparative
further gloomier grander graver greater grimmer harder harsher
healthier heavier higher however larger later leaner lengthier less-
perfectly lesser lonelier longer louder lower more ...
RBS: adverb, superlative
best biggest bluntest earliest farthest first furthest hardest
heartiest highest largest least less most nearest second tightest worst
RP: particle
aboard about across along apart around aside at away back before behind
by crop down ever fast for forth from go high i.e. in into just later
low more off on open out over per pie raising start teeth that through
under unto up up-pp upon whole with you
SYM: symbol
% & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***
TO: "to" as preposition or infinitive marker
to
UH: interjection
Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen
huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly
man baby diddle hush sonuvabitch ...
VB: verb, base form
ask assemble assess assign assume atone attention avoid bake balkanize
bank begin behold believe bend benefit bevel beware bless boil bomb
boost brace break bring broil brush build ...
VBD: verb, past tense
dipped pleaded swiped regummed soaked tidied convened halted registered
cushioned exacted snubbed strode aimed adopted belied figgered
speculated wore appreciated contemplated ...
VBG: verb, present participle or gerund
telegraphing stirring focusing angering judging stalling lactating
hankerin' alleging veering capping approaching traveling besieging
encrypting interrupting erasing wincing ...
VBN: verb, past participle
multihulled dilapidated aerosolized chaired languished panelized used
experimented flourished imitated reunifed factored condensed sheared
unsettled primed dubbed desired ...
VBP: verb, present tense, not 3rd person singular
predominate wrap resort sue twist spill cure lengthen brush terminate
appear tend stray glisten obtain comprise detest tease attract
emphasize mold postpone sever return wag ...
VBZ: verb, present tense, 3rd person singular
bases reconstructs marks mixes displeases seals carps weaves snatches
slumps stretches authorizes smolders pictures emerges stockpiles
seduces fizzes uses bolsters slaps speaks pleads ...
WDT: WH-determiner
that what whatever which whichever
WP: WH-pronoun
that what whatever whatsoever which who whom whosoever
WP$: WH-pronoun, possessive
whose
WRB: Wh-adverb
how however whence whenever where whereby whereever wherein whereof why
``: opening quotation mark
` ``
"""
#