diff --git a/CHANGELOG.md b/CHANGELOG.md index 476338fe7..029a365d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * `shiny create` includes new and improved `ui.Chat()` template options. Most of these templates leverage the new [`{chatlas}` package](https://posit-dev.github.io/chatlas/), our opinionated approach to interfacing with various LLM. (#1806) +* `ui.Chat`'s `.append_message()` method now automatically streams generators and async generators. (#1800) + ### Bug fixes * `ui.Chat()` now correctly handles new `ollama.chat()` return value introduced in `ollama` v0.4. (#1787) diff --git a/shiny/ui/_chat.py b/shiny/ui/_chat.py index 61476cd48..bad18db7b 100644 --- a/shiny/ui/_chat.py +++ b/shiny/ui/_chat.py @@ -541,13 +541,21 @@ async def append_message(self, message: Any) -> None: The message to append. A variety of message formats are supported including a string, a dictionary with `content` and `role` keys, or a relevant chat completion object from platforms like OpenAI, Anthropic, Ollama, and others. + When the message is a generator or async generator, it is automatically + treated as a stream of message chunks (i.e., uses + `.append_message_stream()`) Note ---- - Use `.append_message_stream()` instead of this method when `stream=True` (or - similar) is specified in model's completion method. + Although this method tries its best to handle various message formats, it's + not always possible to handle every message format. If you encounter an error + or no response when appending a message, try extracting the message content + as a string and passing it to this method. """ - await self._append_message(message) + if inspect.isasyncgen(message) or inspect.isgenerator(message): + await self.append_message_stream(message) + else: + await self._append_message(message) async def _append_message( self, message: Any, *, chunk: ChunkOption = False, stream_id: str | None = None