forked from cliveverghese/proj
-
Notifications
You must be signed in to change notification settings - Fork 0
/
final2.py
311 lines (240 loc) · 7.19 KB
/
final2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
#!/usr/bin/python
#Main Program
import nltk.data
from nltk.corpus import stopwords
import re
from nltk.stem.wordnet import WordNetLemmatizer
from classes import *
import sys,getopt
from scipy.cluster import hierarchy
import numpy
import extract as ext
from nltk.tag.simplify import simplify_wsj_tag
from nltk.corpus import wordnet as wn
#import scipy.cluster import vq.kmeans2
def usage():
''' Print the command line usage of the program'''
print "Usage: " + sys.argv[0] + " [OPTIONS] FILE..."
print "See " + sys.argv[0] + " -h for more details"
# TODO(cliveverghese@gmail.com): Remove this function from this file and seperate it into a module.
def removeStopwords(sentence):
'''Remove StopWords'''
ret = []
orig = []
temp = nltk.word_tokenize(sentence)
temp = nltk.pos_tag(temp)
stmr = WordNetLemmatizer()
temp = [(word, simplify_wsj_tag(tag)) for word, tag in temp]
sen = [ stmr.lemmatize(x.lower(),tag[0].lower()) for x,tag in temp if tag in ['N','NP','NUM','V','VD','VG','VN']]
#sen = [ stmr.lemmatize(word.lower(),'v') for word in re.sub("[^\w]"," ",sentence).split() if word.lower() not in stopwords.words('english') ]
return sen
def vectorise(sent,bag_of_words):
v = [0 for x in range(len(bag_of_words)) ]
for word in sent:
if word in bag_of_words:
v[bag_of_words.index(word)] += 1
return v
# TODO(cliveverghese@gmail.com): Add more command line options
args = sys.argv[1:]
try:
arg,opt = getopt.getopt(args,"h")
except getopt.GetoptError:
usage()
sys.exit(1)
if len(opt) == 0:
usage()
sys.exit(1)
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentence = []
document_vector = []
doc_vec = [];
file_names ={}
j=0
total_sentences = 0
for tempfile in opt:
fp = open(tempfile)
file_names[tempfile] = j;
datas = fp.read()
i = 0
paranum = 0
tl = []
for data in datas.splitlines():
data = data.replace('\n','')
data = data.replace('\t','')
data = data.replace('\r','')
sys.stdout.write( "\rProcessing para " + str(paranum))
if len(data) <= 3:
continue
data = tokenizer.tokenize(data)
for sen in data:
#print "(" + str(i) + ")" + sen
bog = removeStopwords(sen)
if len(bog) < 2:
continue
tl.append(bog);
sentence.append(sentenceRepresentation(bog,0,sen,tempfile,i,paranum))
i = i + 1
paranum += 1
fp.close()
doc_vec.append(tl)
total_sentences += i
j += 1
bag_of_words = []
index_bag_of_words = {}
i = 0
processed_sentences = 1
for sen in sentence:
print "\rProcessing Sentence " + str(processed_sentences) + " of " + str(len(sentence)),
processed_sentences += 1
for word in sen.sentence:
if not index_bag_of_words.has_key(word):
done = False
for al in bag_of_words:
try:
res = wn.synsets(al)[0].path_similarity(wn.synsets(word)[0])
if res == 1:
index_bag_of_words[word] = bag_of_words.index(al)
done = True
break
except Exception:
continue
if not done :
index_bag_of_words[word] = i
i += 1
bag_of_words.append( word )
i = 0
global_vector = [0 for x in range(len(bag_of_words)) ]
sentence_temp = []
print "Size of bag of words = " + str(len(bag_of_words))
for sen in sentence:
v = [ 0 for x in range(len(bag_of_words)) ]
for word in sen.sentence:
v[index_bag_of_words[word]] += 1
global_vector[index_bag_of_words[word]] += 1
sen.words = Vector(v)
document_vector.append(v)
i = i + 1
print "How many sentences : "
n = int(raw_input())
sentence = ext.sumarize(document_vector,sentence,n,bag_of_words)
#print sentence
#while len(sentence) > n :
# prev_len = len(sentence)
# while sentence[0].weight < fact + 0.10:
# print "Removing sentence with weight " + str(sentence[0].weight)
# temp_global_vector = temp_global_vector - sentence[0].words
# sentence.remove(sentence[0])
# for sen in sentence:
# flag = 0
# for sen1 in sentence:
# temp = sen1.words.cosine(sen.words)
# if temp > 0.40 - fact and sentence.index(sen) != sentence.index(sen1):
# flag = 1
# if flag == 1:
# print "Removing redundant sentence with " + str(temp)
# #temp_global_vector = temp_global_vector - sen.words
# sentence.remove(sen)
# for sen in sentence:
# sen.weight = temp_global_vector.cosine(sen.words)
# fact += 0.01
print "\rSummary Of the given text"
"""i = max(global_vector.data)
printed = 0
while printed < 3:
for t in range(len(global_vector.data)):
if global_vector[t] == i:
print bag_of_words[t] + " ",
printed += 1
i -= 1
"""
print "\n"
for sen in sentence:
print sen.original + "(" + sen.original_file + "," + str(sen.file_position) +"," + str(sen.length) + "," + str(sen.weight) + ")"
#Ordering by file position
sentence = sorted(sentence,key = lambda x: x.file_position)
preclist = []
succlist = []
param = 4;
print "\n"
for sen in sentence:
#print sen.original + "(" + sen.original_file + "," + str(sen.file_position) +"," + str(sen.length) + "," + str(sen.weight) + ")"
v = [0 for x in range(len(bag_of_words)) ]
tlist = []
if(sen.file_position-param < 0):
j=0
else:
j= sen.file_position - param;
for i in range(j,sen.file_position) :
for word in doc_vec[file_names[sen.original_file]][i]:
if word in bag_of_words:
v[bag_of_words.index(word)] += 1
val = Vector(v).cosine(sen.words)
tlist.append(val)
if len(tlist) > 0:
sen.prec = max(tlist)
else:
sen.prec = 0
for sen in sentence:
#print sen.original + "(" + sen.original_file + "," + str(sen.file_position) +"," + str(sen.length) + "," + str(sen.weight) + ")"
v = [0 for x in range(len(bag_of_words)) ]
tlist = []
if(sen.file_position + param > len(doc_vec[file_names[sen.original_file]]) ):
j=len(doc_vec[file_names[sen.original_file]])
else:
j= sen.file_position + param;
for i in range(sen.file_position+1,j) :
v = vectorise( doc_vec[file_names[sen.original_file]][i],bag_of_words)
val = Vector(v).cosine(sen.words)
tlist.append(val)
if len(tlist) > 0:
sen.succ = max(tlist)
else:
sen.succ = 0
def chroexp(sen1,sen2):
if(sen1.file_position > sen2.file_position):
return 1
if(sen1.file_position == sen2.file_position):
return 0.5
else:
return 0
def precexp(sen1,sen2):
if(sen1.prec > sen2.prec):
return 1
if(sen1.prec == sen2.prec):
return 0.5
else:
return 0
def succexp(sen1,sen2):
if(sen1.succ> sen2.succ):
return 1
if(sen1.succ == sen2.succ):
return 0.5
else:
return 0
def piorder1(sen1,sen2):
total = 0.5*chroexp(sen1,sen2) + .3*precexp(sen1,sen2) +.2*succexp(sen1,sen2)
return total
def piorder2(sen1,sen2):
total2 = 0.334*chroexp(sen2,sen1) + .333*precexp(sen2,sen1) +.333*succexp(sen2,sen1)
return total2
for sen1 in sentence:
sig1=0
sig2=0
for sen2 in sentence:
sig1 = sig1 + piorder1(sen1,sen2)
sig2 = sig2 + piorder1(sen2,sen1)
sen1.pi=(sig1-sig2)
sentence = sorted(sentence,key= lambda x: x.pi)
ordered = []
while len(sentence) > 0:
t = sentence[0]
sentence.remove(t)
ordered.append(t);
for sen in sentence:
sen.pi = sen.pi + piorder1(t,sen) - piorder1(sen,t)
sentence = sorted(sentence,key= lambda x: x.pi)
print "\n\nAfter Ordering\n"
for sen in ordered:
print sen.original + "("+str(sen.file_position)+")" + "("+str(sen.para_num)+")" + "(" + sen.original_file + ")"
# TODO(balan1.618@gmail.com): Add the sentence reordering
# TODO: Document all functions used within our code including the once that we created