-
Notifications
You must be signed in to change notification settings - Fork 7
/
sample.py
98 lines (78 loc) · 3.46 KB
/
sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
from __future__ import division
import onmt
import torch
import argparse
import math
import pdb
parser = argparse.ArgumentParser(description='interpolate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=100,
help='Maximum sentence length.')
parser.add_argument('-num_pts', type=int, default=10,
help='number of output points of interpolation.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had the highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / wordsTotal,
name, math.exp(-scoreTotal/wordsTotal)))
def addone(f):
for line in f:
yield line
yield None
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt)
outF = open(opt.output, 'w')
predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0
count = 0
predBatch, predScore = translator.sample(opt.num_pts)
#predScoreTotal += sum(score[0] for score in predScore)
predWordsTotal += sum(len(x[0]) for x in predBatch)
#if tgtF is not None:
# goldScoreTotal += sum(goldScore)
# goldWordsTotal += sum(len(x) for x in tgtBatch)
for b in range(len(predBatch)):
count += 1
outF.write(" ".join(predBatch[b][0]) + '\n')
outF.flush()
if opt.verbose:
#srcSent = ' '.join(srcBatch[b])
#if translator.tgt_dict.lower:
# srcSent = srcSent.lower()
#print('SENT %d: %s' % (count, srcSent))
print('PRED %d: %s' % (count, " ".join(predBatch[b][0])))
print("PRED SCORE: %.4f" % predScore[b][0])
if opt.n_best > 1:
print('\nBEST HYP:')
for n in range(opt.n_best):
print("[%.4f] %s" % (predScore[b][n], " ".join(predBatch[b][n])))
print('')
if __name__ == "__main__":
main()