-
Notifications
You must be signed in to change notification settings - Fork 2
/
filter.py
200 lines (159 loc) · 7.51 KB
/
filter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import logging
from string import Template
from typing import List
from council.contexts import AgentContext, ChatMessage, ChatMessageKind, LLMContext, ScoredChatMessage
from council.filters import FilterBase
from council.llm import LLMBase, LLMMessage
from controller import AppState
logger = logging.getLogger("council")
class WritingAssistantFilter(FilterBase):
def __init__(self, llm: LLMBase, state: AppState):
super().__init__()
self.state = state
self._llm = self.new_monitor("llm", llm)
def _execute(self, context: AgentContext) -> List[ScoredChatMessage]:
"""
Aggregation phase.
Get latest iteration results from Evaluator and aggregate if applicable.
"""
all_eval_results = sorted(context.evaluation, key=lambda x: x.score, reverse=True)
current_iteration_results = []
for scored_result in all_eval_results:
message = scored_result.message
if message.data['iteration'] == self.state.iteration:
current_iteration_results.append(message)
## If multiple outlines or articles were generated in the last iteration,
## use LLM calls to aggregate them.
outlines = []
articles = []
conversation_history = [f"{m.kind}: {m.message}" for m in context.chat_history.messages]
for message in current_iteration_results:
source = message.source
if source == "SectionWriterSkill":
articles.append(message.data['article'])
elif source == "OutlineWriterSkill":
outlines.append(message.data['outline'])
### Outline Aggregation
system_prompt = "You are an expert-level AI writing editor. Your role is to aggregate multiple suggestions for an article outline into a single one."
main_prompt_template = Template("""
# Task Description
Your task is to combine one or more article outlines into a single one written in markdown format.
# Instructions
Read the CHAT HISTORY, EXISTING OUTLINE, and POSSIBLE OUTLINES. Then respond with a single article outline that best combines the POSSIBLE OUTLINES.
## CONVERSATION HISTORY
$conversation_history
## EXISTING OUTLINE
$existing_outline
## POSSIBLE OUTLINES
$possible_outlines
## OUTLINE
```markdown
""")
if len(outlines) > 0:
messages = [
LLMMessage.system_message(system_prompt),
LLMMessage.user_message(
main_prompt_template.substitute(
conversation_history=conversation_history,
existing_outline=self.state.outline,
possible_outlines=outlines
)
),
]
llm_result = self._llm.inner.post_chat_request(
LLMContext.from_context(context, self._llm),
messages=messages
)
response = llm_result.first_choice
self.state.outline = response
### Article Aggregation
system_prompt = "You are an expert-level AI writing editor. Your role is to aggregate multiple partial articles into a single, complete article."
main_prompt_template = Template("""
# Task Description
Your task is to combine one or more partial articles into a single one written in markdown format.
# Instructions
Read the CHAT HISTORY, ARTICLE OUTLINE, EXISTING ARTICLE, and PARTIAL ARTICLES.
Then respond with a single article that best combines and expands the PARTIAL ARTICLES.
The resulting ARTICLE should include all sections and subsections in the ARTICLE OUTLINE.
## CONVERSATION HISTORY
$conversation_history
## ARTICLE OUTLINE
$article_outline
## EXISTING ARTICLE
$existing_article
## PARTIAL ARTICLES
$partial_articles
## ARTICLE
```markdown
""")
if len(articles) > 0:
messages = [
LLMMessage.system_message(system_prompt),
LLMMessage.user_message(
main_prompt_template.substitute(
conversation_history=conversation_history,
article_outline=self.state.outline,
existing_article=self.state.article,
partial_articles=articles
)
),
]
llm_result = self._llm.inner.post_chat_request(
context=LLMContext.from_context(context, self._llm),
messages=messages)
self.state.article = llm_result.first_choice
### Decide whether to keep iterating or to return the article
### to the user for review.
system_prompt = "You are an expert-level AI writing editor. Your role is to decide whether to keep editing the ARTICLE."
main_prompt_template = Template("""
# Task Description
Your task is to decide whether:
1. To keep editing the ARTICLE, or
2. To return the article to the requesting agent.
You will use a CHECK LIST to determine whether to KEEP EDITING.
# Instructions
Consider every item in the CHECK LIST.
If any item is true, KEEP EDITING.
You must be careful and accurate when completing the CHECK LIST.
# CHECK LIST
- If the ARTICLE still has placeholders or empty sections, KEEP EDITING.
- If the ARTICLE is incoherent, KEEP EDITING.
- If there are ARTICLE subsections with fewer than three paragraphs, KEEP EDITING.
- If the ARTICLE does not include everything being requested in the CHAT HISTORY, KEEP EDITING.
- If the ARTICLE does not include every section and subsection in ARTICLE OUTLINE, KEEP EDITING.
- WORD COUNT: What is the ARTICLE's word count?
- If the WORD COUNT is less than 1500 words, KEEP EDITING.
- SECTIONS and SUBSECTIONS: Does the ARTICLE contain every section and subsection in the ARTICLE OUTLINE?
- If the ARTICLE is missing SECTIONS or SUBSECTIONS from the ARTICLE OUTLINE, KEEP EDITING.
- If the ARTICLE has any sections or subsections with fewer than three detailed paragraphs, KEEP EDITING.
## ARTICLE OUTLINE
$outline
## ARTICLE
<article>
$article
</article>
## CONVERSATION HISTORY
$conversation_history
# Your Response (a list of all CHECK LIST results followed by exactly one of ["KEEP EDITING", "RETURN TO REQUESTING AGENT"])
""")
messages = [
LLMMessage.system_message(system_prompt),
LLMMessage.user_message(
main_prompt_template.substitute(
article=self.state.article,
outline=self.state.outline,
conversation_history=conversation_history,
)
),
]
llm_result = self._llm.inner.post_chat_request(
context=LLMContext.from_context(context, self._llm),
messages=messages)
response = llm_result.first_choice
logger.debug(f"outline: {self.state.outline}")
logger.debug(f"article: {self.state.article}")
logger.debug(f"controller editing decision: {response}")
if "KEEP EDITING" in response:
return []
else:
return [ScoredChatMessage(ChatMessage(message=self.state.article, kind=ChatMessageKind.Agent), 1.0)]