forked from sigoden/aichat
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.example.yaml
94 lines (81 loc) · 4.24 KB
/
config.example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
model: openai:gpt-3.5-turbo # LLM model
temperature: 1.0 # GPT temperature, between 0 and 2
save: true # Whether to save the message
highlight: true # Set false to turn highlight
light_theme: false # Whether to use a light theme
wrap: no # Specify the text-wrapping mode (no, auto, <max-width>)
wrap_code: false # Whether wrap code block
auto_copy: false # Automatically copy the last output to the clipboard
keybindings: emacs # REPL keybindings. (emacs, vi)
prelude: '' # Set a default role or session (role:<name>, session:<name>)
# Compress session if tokens exceed this value (valid when >=1000)
compress_threshold: 1000
# The prompt for summarizing session messages
summarize_prompt: 'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
# The prompt for the summary of the session
summary_prompt: 'This is a summary of the chat history as a recap: '
# Custom REPL prompt, see https://github.com/sigoden/aichat/wiki/Custom-REPL-Prompt
left_prompt: '{color.green}{?session {session}{?role /}}{role}{color.cyan}{?session )}{!session >}{color.reset} '
right_prompt: '{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'
clients:
# All clients have the following configuration:
# - type: xxxx
# name: nova # Only use it to distinguish clients with the same client type. Optional
# extra:
# proxy: socks5://127.0.0.1:1080 # Specify https/socks5 proxy server. Note HTTPS_PROXY/ALL_PROXY also works.
# connect_timeout: 10 # Set a timeout in seconds for connect to server
# See https://platform.openai.com/docs/quickstart
- type: openai
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
api_base: https://api.openai.com/v1 # Optional field
organization_id: org-xxxxxxxxxxxxxxxxxxxxxxxx # Optional field
# See https://ai.google.dev/docs
- type: gemini
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See https://docs.anthropic.com/claude/reference/getting-started-with-the-api
- type: claude
api_key: sk-xxx
- type: mistral
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Any openai-compatible API providers or https://github.com/go-skynet/LocalAI
- type: localai
api_base: http://localhost:8080/v1
api_key: xxx
chat_endpoint: /chat/completions # Optional field
models:
- name: llama2
max_input_tokens: 8192
extra_fields: # Optional field, set custom parameters
key: value
- name: llava
max_input_tokens: 8192
capabilities: text,vision # Optional field, possible values: text, vision
# See https://github.com/jmorganca/ollama
- type: ollama
api_base: http://localhost:11434
api_key: Basic xxx # Set authorization header
chat_endpoint: /chat # Optional field
models:
- name: mistral
max_input_tokens: 8192
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart
- type: azure-openai
api_base: https://{RESOURCE}.openai.azure.com
api_key: xxx
models:
- name: MyGPT4 # Model deployment name
max_input_tokens: 8192
# See https://cloud.google.com/vertex-ai
- type: vertexai
api_base: https://{REGION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{REGION}/publishers/google/models
# Setup Application Default Credentials (ADC) file, Optional field
# Run `gcloud auth application-default login` to setup adc
# see https://cloud.google.com/docs/authentication/external/set-up-adc
adc_file: <path-to/gcloud/application_default_credentials.json>
# See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html
- type: ernie
api_key: xxxxxxxxxxxxxxxxxxxxxxxx
secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# See https://help.aliyun.com/zh/dashscope/
- type: qianwen
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx