Skip to content

Commit

Permalink
Add Converse and ShowConversation components (#221)
Browse files Browse the repository at this point in the history
- Move conversational components (`UserMessage` et al) to a separate
file
- Add `isAppendOnlyRender` to `ComponentContext` to allow components to
optimize their stream based on whether they're part of an append-only
stream.
- Add `renderToConversation`, `Converse`, and `ShowConversation`
helpers/components to facilitate conversational UI

The `UseTools` example demonstrates using `ShowConversation` to manage
the presentation.
  • Loading branch information
petersalas authored Jul 31, 2023
1 parent 817cd4a commit 203574a
Show file tree
Hide file tree
Showing 14 changed files with 497 additions and 328 deletions.
11 changes: 10 additions & 1 deletion packages/ai-jsx/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"repository": "fixie-ai/ai-jsx",
"bugs": "https://github.com/fixie-ai/ai-jsx/issues",
"homepage": "https://ai-jsx.com",
"version": "0.7.1",
"version": "0.7.2",
"volta": {
"extends": "../../package.json"
},
Expand Down Expand Up @@ -81,6 +81,15 @@
"default": "./dist/cjs/core/completion.cjs"
}
},
"./core/conversation": {
"import": {
"types": "./dist/esm/core/conversation.d.ts",
"default": "./dist/esm/core/conversation.js"
},
"require": {
"default": "./dist/cjs/core/conversation.cjs"
}
},
"./core/image-gen": {
"import": {
"types": "./dist/esm/core/image-gen.d.ts",
Expand Down
139 changes: 52 additions & 87 deletions packages/ai-jsx/src/batteries/use-tools.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,17 @@
*/

import {
AssistantMessage,
ChatCompletion,
FunctionCall,
FunctionParameters,
FunctionResponse,
SystemMessage,
UserMessage,
} from '../core/completion.js';
import {
Node,
RenderContext,
isElement,
Element,
AppendOnlyStream,
ComponentContext,
RenderableStream,
} from '../index.js';
import { Node, RenderContext, ComponentContext, isElement } from '../index.js';
import z from 'zod';
import { zodToJsonSchema } from 'zod-to-json-schema';
import { AIJSXError, ErrorCode } from '../core/errors.js';
import { Converse, isConversationalComponent, renderToConversation } from '../core/conversation.js';

const toolChoiceSchema = z.object({
nameOfTool: z.string(),
Expand Down Expand Up @@ -206,10 +197,21 @@ export interface UseToolsProps {
* ```
*
*/
export async function* UseTools(props: UseToolsProps, { render }: RenderContext) {
export async function UseTools(props: UseToolsProps, { render, memo }: RenderContext) {
try {
const rendered = yield* render(<UseToolsFunctionCall {...props} />);
return rendered;
// TODO: ErrorBoundaries should be able to preserve the conversational structure, but they can't currently.
// So instead we start rendering the function call until it yields any conversational message. If we see one,
// we know that the <ChatCompletion> didn't immediately fail.
const memoizedFunctionCall = memo(<UseToolsFunctionCall {...props} />);
for await (const containsElement of render(memoizedFunctionCall, {
stop: isConversationalComponent,
map: (frame) => frame.find(isElement) !== undefined,
})) {
if (containsElement) {
break;
}
}
return memoizedFunctionCall;
} catch (e: any) {
if (e.code === ErrorCode.ChatModelDoesNotSupportFunctions) {
return <UseToolsPromptEngineered {...props} />;
Expand All @@ -219,83 +221,46 @@ export async function* UseTools(props: UseToolsProps, { render }: RenderContext)
}

/** @hidden */
export async function* UseToolsFunctionCall(
props: UseToolsProps,
{ render, memo, logger }: ComponentContext
): RenderableStream {
yield AppendOnlyStream;

const conversation = [memo(props.children)];

do {
const modelResponse = memo(<ChatCompletion functionDefinitions={props.tools}>{conversation}</ChatCompletion>);
if (props.showSteps) {
yield modelResponse;
}

const renderResult = await render(modelResponse, {
stop: (el) => el.tag === AssistantMessage || el.tag == FunctionCall,
});
let functionCallElement: Element<any> | null = null;

for (const element of renderResult) {
if (isElement(element)) {
conversation.push(memo(element));

if (element.tag === FunctionCall) {
// Model has generated a function call.
if (functionCallElement) {
throw new AIJSXError(
`ChatCompletion returned 2 function calls at the same time ${renderResult.join(', ')}`,
ErrorCode.ModelOutputCouldNotBeParsedForTool,
'runtime'
);
export async function UseToolsFunctionCall(props: UseToolsProps, { render }: ComponentContext) {
const converse = (
<Converse
reply={async (messages, fullConversation) => {
const lastMessage = messages[messages.length - 1];
switch (lastMessage.type) {
case 'functionCall': {
const { name, args } = lastMessage.element.props;
try {
return <FunctionResponse name={name}>{await props.tools[name].func(args)}</FunctionResponse>;
} catch (e: any) {
return (
<FunctionResponse failed name={name}>
{e.message}
</FunctionResponse>
);
}
}
functionCallElement = element;
}
} else {
logger.debug(
{ text: element },
'<ChatCompletion> emitted something other than <AssistantMessage> or <FunctionCall>, which is unexpected.'
);
}
}

if (functionCallElement) {
// Call the selected function and append the result to the messages.
let response;
try {
const callable = props.tools[functionCallElement.props.name].func;
response = await callable(functionCallElement.props.args);
} catch (e: any) {
response = `Function call to ${functionCallElement.props.name} failed with error: ${e.message}.`;
} finally {
const functionResponse = memo(
<FunctionResponse name={functionCallElement.props.name}>{response}</FunctionResponse>
);
if (props.showSteps) {
yield (
<>
{'\n'}
{functionResponse}
{'\n'}
</>
);
case 'functionResponse':
case 'user':
return (
<ChatCompletion functionDefinitions={props.tools}>
{fullConversation.map((m) => m.element)}
</ChatCompletion>
);
default:
return null;
}
conversation.push(functionResponse);
}

continue;
}
}}
>
{props.children}
</Converse>
);

// Terminate the loop when the model produces a response without a function call.
if (!props.showSteps) {
yield modelResponse;
}
break;
} while (true);
if (props.showSteps) {
return converse;
}

return AppendOnlyStream;
const messages = await renderToConversation(converse, render);
return messages.length && messages[messages.length - 1].element;
}

/** @hidden */
Expand Down
133 changes: 8 additions & 125 deletions packages/ai-jsx/src/core/completion.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
* @packageDocumentation
*/

import { ChatCompletionResponseMessage } from 'openai';
import * as AI from '../index.js';
import { Node, Component, RenderContext } from '../index.js';
import { AIJSXError, ErrorCode } from '../core/errors.js';
Expand All @@ -12,6 +11,14 @@ import { getEnvVar } from '../lib/util.js';
import { AnthropicChatModel } from '../lib/anthropic.js';
import z from 'zod';
import { zodToJsonSchema } from 'zod-to-json-schema';
export {
UserMessage,
SystemMessage,
AssistantMessage,
FunctionCall,
FunctionResponse,
ConversationHistory,
} from './conversation.js';

/**
* Represents properties passed to a given Large Language Model.
Expand Down Expand Up @@ -211,130 +218,6 @@ export function ChatProvider<T extends ModelPropsWithChildren>(
);
}

/**
* Provide a System Message to the LLM, for use within a {@link ChatCompletion}.
*
* The system message can be used to put the model in character. See https://platform.openai.com/docs/guides/gpt/chat-completions-api for more detail.
*
* @example
* ```tsx
* <ChatCompletion>
* <SystemMessage>You are a helpful customer service agent.</SystemMessage>
* </ChatCompletion>
* ```
*/
export function SystemMessage({ children }: { children: Node }) {
return children;
}

/**
* Provide a User Message to the LLM, for use within a {@link ChatCompletion}.
*
* The user message tells the model what the user has said. See https://platform.openai.com/docs/guides/gpt/chat-completions-api for more detail.
*
* @example
* ```tsx
* <ChatCompletion>
* <UserMessage>I'd like to cancel my account.</UserMessage>
* </ChatCompletion>
*
* ==> 'Sorry to hear that. Can you tell me why?
* ```
*/
export function UserMessage({ children }: { name?: string; children: Node }) {
return children;
}

/**
* Provide an Assistant Message to the LLM, for use within a {@link ChatCompletion}.
*
* The assistant message tells the model what it has previously said. See https://platform.openai.com/docs/guides/gpt/chat-completions-api for more detail.
*
* @example
* ```tsx
* <ChatCompletion>
* <UserMessage>I'd like to cancel my account.</UserMessage>
* <AssistantMessage>Sorry to hear that. Can you tell me why?</AssistantMessage>
* <UserMessage>It's too expensive.</UserMessage>
* </ChatCompletion>
* ```
*
* ==> "Ok, thanks for that feedback. I'll cancel your account."
*/
export function AssistantMessage({ children }: { children: Node }) {
return children;
}

export function ConversationHistory({ messages }: { messages: ChatCompletionResponseMessage[] }) {
return messages.map((message) => {
switch (message.role) {
case 'system':
return <SystemMessage>{message.content}</SystemMessage>;
case 'user':
return <UserMessage>{message.content}</UserMessage>;
case 'assistant':
return <AssistantMessage>{message.content}</AssistantMessage>;
case 'function':
return (
<FunctionCall name={message.function_call!.name!} args={JSON.parse(message.function_call!.arguments!)} />
);
}
});
}

/**
* Provide a function call to the LLM, for use within a {@link ChatCompletion}.
*
* The function call tells the model that a function was previously invoked by the model. See https://platform.openai.com/docs/guides/gpt/chat-completions-api for more detail.
* When the model returns a function call, @{link ChatCompletion} returns a @{link FunctionCall} component.
*
* @example
* ```tsx
* <ChatCompletion>
* <UserMessage>What is 258 * 322?</UserMessage>
* <FunctionCall name="evaluate_math" args={expression: "258 * 322"} />
* <FunctionResponse name="evaluate_math">83076</FunctionResponse>
* </ChatCompletion>
*
* ==> "That would be 83,076."
* ```
*/
export function FunctionCall({
name,
partial,
args,
}: {
name: string;
partial?: boolean;
args: Record<string, string | number | boolean | null>;
}) {
return `Call function ${name} with ${partial ? '(incomplete) ' : ''}${JSON.stringify(args)}`;
}

/**
* Renders to the output of a previous {@link FunctionCall} component, for use within a {@link ChatCompletion}.
*
* See https://platform.openai.com/docs/guides/gpt/chat-completions-api for more detail.
*
* @example
* ```tsx
* <ChatCompletion>
* <UserMessage>What is 258 * 322?</UserMessage>
* <FunctionCall name="evaluate_math" args={expression: "258 * 322"} />
* <FunctionResponse name="evaluate_math">83076</FunctionResponse>
* </ChatCompletion>
*
* ==> "That would be 83,076."
* ```
*/
export async function FunctionResponse(
{ name, children }: { name: string; children: Node },
{ render }: AI.ComponentContext
) {
const output = await render(children);
return `function ${name} returns ${output}`;
}

/**
* Perform a Large Language Mokdel call to do a [completion](https://platform.openai.com/docs/guides/gpt/completions-api).
*
Expand Down
Loading

3 comments on commit 203574a

@vercel
Copy link

@vercel vercel bot commented on 203574a Jul 31, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-docs – ./packages/docs

ai-jsx-docs-git-main-fixie-ai.vercel.app
ai-jsx-docs.vercel.app
ai-jsx-docs-fixie-ai.vercel.app
docs.ai-jsx.com

@vercel
Copy link

@vercel vercel bot commented on 203574a Jul 31, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-tutorial-nextjs – ./packages/tutorial-nextjs

ai-jsx-tutorial-nextjs-fixie-ai.vercel.app
ai-jsx-tutorial-nextjs.vercel.app
ai-jsx-tutorial-nextjs-git-main-fixie-ai.vercel.app

@vercel
Copy link

@vercel vercel bot commented on 203574a Jul 31, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-nextjs-demo – ./packages/nextjs-demo

ai-jsx-nextjs-demo-fixie-ai.vercel.app
ai-jsx-nextjs-demo.vercel.app
ai-jsx-nextjs-demo-git-main-fixie-ai.vercel.app

Please sign in to comment.