-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathgenerate_questions_t5.py
52 lines (38 loc) · 1.64 KB
/
generate_questions_t5.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# -*- coding: utf-8 -*-
"""Generate_Questions_T5.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/18vkf9bXtpQCMDWWyOew1KCfUW_m9ZQtn
"""
from transformers import T5ForConditionalGeneration,T5Tokenizer
#T5 model size on disk ~ 900 MB
def question_model_tokenizer():
question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')
return question_model, question_tokenizer
import requests
import json
import re
import random
from pywsd.similarity import max_similarity
from pywsd.lesk import adapted_lesk
from pywsd.lesk import simple_lesk
from pywsd.lesk import cosine_lesk
from nltk.corpus import wordnet as wn
def get_question(sentence,answer,mdl,tknizer):
text = "context: {} answer: {}".format(sentence,answer)
#print (text)
max_len = 256
encoding = tknizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors="pt")
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
outs = mdl.generate(input_ids=input_ids,
attention_mask=attention_mask,
early_stopping=True,
num_beams=5,
num_return_sequences=1,
no_repeat_ngram_size=2,
max_length=300)
dec = [tknizer.decode(ids,skip_special_tokens=True) for ids in outs]
question = dec[0].replace("question:","")
question= question.strip()
return question