diff --git a/docs/general_info/breaking_changes.md b/docs/general_info/breaking_changes.md index 71da4668987..ac3300c2764 100644 --- a/docs/general_info/breaking_changes.md +++ b/docs/general_info/breaking_changes.md @@ -11,13 +11,21 @@ weight: -10 Certain changes in the updates may impact cookies, leading to unexpected behaviors if not cleared properly. --- -## v0.7.1+ -!!! info "🔍 Google Search Plugin" +## v0.7.0+ - - **[Google Search Plugin](../features/plugins/google_search.md)**: Changed the environment variable for this plugin from `GOOGLE_API_KEY` to `GOOGLE_SEARCH_API_KEY` due to a conflict with the Google Generative AI library pulling this variable automatically. If you are using this plugin, please update your `.env` file accordingly. +!!! failure "Error Messages (UI)" + ![image](https://github.com/danny-avila/LibreChat/assets/32828263/0ab27798-5515-49b4-ac29-e4ad83d73d7c) -## v0.7.0+ + Client-facing error messages now display this warning asking to contact the admin. For the full error consult the console logs or the additional logs located in `./logs` + +!!! warning "🪵Logs Location" + + - The full logs are now in `./logs` (they are still in `./api/logs` for local, non-docker installations) + +!!! warning "🔍 Google Search Plugin" + + - **[Google Search Plugin](../features/plugins/google_search.md)**: Changed the environment variable for this plugin from `GOOGLE_API_KEY` to `GOOGLE_SEARCH_API_KEY` due to a conflict with the Google Generative AI library pulling this variable automatically. If you are using this plugin, please update your `.env` file accordingly. !!! info "🗃️ RAG API (Chat with Files)" diff --git a/docs/install/configuration/ai_endpoints.md b/docs/install/configuration/ai_endpoints.md index 3b86fdb8024..3169e834371 100644 --- a/docs/install/configuration/ai_endpoints.md +++ b/docs/install/configuration/ai_endpoints.md @@ -64,9 +64,11 @@ Some of the endpoints are marked as **Known,** which means they might have speci baseURL: "https://api.groq.com/openai/v1/" models: default: [ + "llama3-70b-8192", + "llama3-8b-8192", "llama2-70b-4096", "mixtral-8x7b-32768", - "gemma-7b-it" + "gemma-7b-it", ] fetch: false titleConvo: true @@ -374,3 +376,31 @@ Some of the endpoints are marked as **Known,** which means they might have speci forcePrompt: false modelDisplayLabel: "Ollama" ``` + +!!! tip "Ollama -> llama3" + + To prevent the behavior where llama3 does not stop generating, add this `addParams` block to the config: + + ```yaml + - name: "Ollama" + apiKey: "ollama" + baseURL: "http://host.docker.internal:11434/v1/" + models: + default: [ + "llama3" + ] + fetch: false # fetching list of models is not supported + titleConvo: true + titleModel: "llama3" + summarize: false + summaryModel: "llama3" + forcePrompt: false + modelDisplayLabel: "Ollama" + addParams: + "stop": [ + "<|start_header_id|>", + "<|end_header_id|>", + "<|eot_id|>", + "<|reserved_special_token" + ] + ``` \ No newline at end of file diff --git a/librechat.example.yaml b/librechat.example.yaml index 00f0f9fb507..6a119c01cf6 100644 --- a/librechat.example.yaml +++ b/librechat.example.yaml @@ -50,7 +50,13 @@ endpoints: apiKey: '${GROQ_API_KEY}' baseURL: 'https://api.groq.com/openai/v1/' models: - default: ['llama2-70b-4096', 'mixtral-8x7b-32768', 'gemma-7b-it'] + default: [ + "llama3-70b-8192", + "llama3-8b-8192", + "llama2-70b-4096", + "mixtral-8x7b-32768", + "gemma-7b-it", + ] fetch: false titleConvo: true titleModel: 'mixtral-8x7b-32768'