-
Notifications
You must be signed in to change notification settings - Fork 0
/
confidence_estimation.py
169 lines (147 loc) · 5.9 KB
/
confidence_estimation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
"""
python confidence_estimation.py --dataset=wikibio --backbone=chatgpt
"""
import json
import os
import numpy as np
from absl import app, flags
from tqdm.auto import tqdm as auto_tqdm
from src import prompts
from src.common import chatgpt_query, llama_query
from src.data_utils import (
load_factcheckgpt_processed_human,
load_felm_science_processed_human,
load_wikibio_processed_human,
)
FLAGS = flags.FLAGS
def set_eval_args():
flags.DEFINE_enum(
"dataset",
None,
["wikibio", "felm", "factcheckgpt",],
help="Name of the evaluation Dataset.",
)
flags.DEFINE_enum(
"backbone",
None,
["chatgpt", "llama",],
help="The name of the model that used to generate belief trees.",
)
def load_all_statment(statement_to_prem):
all_statements = []
for log_dict in statement_to_prem:
statement = log_dict["statement"]
if statement not in all_statements:
all_statements.append(statement)
childnodes = log_dict["child_nodes"]
for childnode in childnodes:
gen_method = childnode["prem_gen_method"]
if gen_method == "perturbation":
continue
prems = childnode["child_nodes"]
for prem_id, prem in enumerate(prems):
if prem not in all_statements:
all_statements.append(prem)
if FLAGS.dataset == "felm":
data_list = load_felm_science_processed_human()
elif FLAGS.dataset == "wikibio":
data_list = load_wikibio_processed_human()
elif FLAGS.dataset == "fact":
data_list = load_factcheckgpt_processed_human()
else:
raise NotImplementedError
for log_dict in data_list:
segments = log_dict["segmented_response"]
for segment in segments:
if segment not in all_statements:
all_statements.append(segment)
def main(argv):
datapath = f"logs/premise_gen/recycle/{FLAGS.dataset}_{FLAGS.backbone}.json"
if FLAGS.backbone == "chatgpt":
query_fn = chatgpt_query
confidence_gen_prompt = prompts.chatgpt_confidence_prob
elif FLAGS.backbone == "llama":
query_fn = llama_query
confidence_gen_prompt = prompts.llama_confidence_prob
else:
raise NotImplementedError
with open(datapath, "r", encoding="utf-8") as f:
statement_to_prem = json.load(f)
all_statements = load_all_statment(statement_to_prem)
conf_estimation_logs = []
prog_bar = auto_tqdm(range(len(all_statements)))
for data_example in all_statements:
if FLAGS.backbone == "chatgpt":
user_prompt = confidence_gen_prompt + " " + data_example.strip()
response = query_fn(
messages=[{"role": "user", "content": user_prompt},],
temperature=0.0,
max_tokens=5,
n=1,
logprobs=True,
top_logprobs=5,
)
elif FLAGS.backbone == "llama":
instruction_str = confidence_gen_prompt[:]
messages = [
{"role": "user", "content": instruction_str},
{
"role": "assistant",
"content": "Yes, I understand the task. You can provide the statement"
" you'd like me to guess the truthfulness.",
},
]
messages.append({"role": "user", "content": "Statement: " + data_example.strip()})
response = query_fn(
messages=messages, temperature=0.0, max_tokens=5, n=1, logprobs=True, top_logprobs=5
)
if FLAGS.backbone == "chatgpt":
if response is None or response.choices[0].message.content is None:
prob = 0.5
raw_outputs = ""
else:
ans_model = response.choices[0].message.content
raw_outputs = ans_model
first_logprobs = response.choices[0].logprobs.content[0].top_logprobs
true_logprob = None
false_logprob = None
for logprob in first_logprobs:
token = logprob.token
logit = logprob.logprob
if "true" in token.lower():
if true_logprob is None:
true_logprob = logit
elif "false" in token.lower():
if false_logprob is None:
false_logprob = logit
else:
continue
if true_logprob is None or false_logprob is None:
prob = 0.5
else:
cls_array = np.array([true_logprob, false_logprob])
cls_array = np.exp(cls_array) / np.sum(np.exp(cls_array))
prob = cls_array.tolist()[0]
else:
ans_model = response.choices[0].message.content
raw_outputs = ans_model
logprob_dict = response.choices[0].logprobs.top_logprobs[0]
true_prob = np.exp(logprob_dict.get(" True", -20)) + np.exp(
logprob_dict.get("True", -20)
)
false_prob = np.exp(logprob_dict.get(" False", -20)) + np.exp(
logprob_dict.get("False", -20)
)
cls_array = np.array([true_prob, false_prob])
cls_array = cls_array / np.sum(cls_array)
prob = cls_array.tolist()[0]
log_dict = {"statement": data_example, "probability": prob, "raw_output": raw_outputs}
conf_estimation_logs.append(log_dict)
prog_bar.update(1)
savename = f"logs/conf_estimation/{FLAGS.dataset}_{FLAGS.backbone}.json"
if not os.path.exists(os.path.dirname(savename)):
os.makedirs(os.path.dirname(savename))
with open(savename, "w", encoding="utf-8") as f:
json.dump(conf_estimation_logs, f, indent=4)
if __name__ == "__main__":
app.run(main)