-
Notifications
You must be signed in to change notification settings - Fork 9
/
.env.example
108 lines (108 loc) · 4.13 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
bot_token=12345...:...
adminlist=[1810772]
blacklist=[]
whitelist=[1810772, -123456789010]
ignore_mode=blacklist
active_modules=["sd", "llm", "tts", "stt", "admin"]
tts_path=/Users/user/tts_provider/models
tts_voices='[
]'
tts_mode=local
tts_replacements={"key": "value", "key2": "value2"}
tts_credits="TTS models trained by "
tts_ffmpeg_path=/Users/user/Applications/ffmpeg
tts_queue_size_per_user=2
tts_enable_backends=["say_macos", "ttsx4", "coqui_tts", "so_vits_svc"]
tts_so_vits_svc_4_0_code_path='/Users/user/path/to/so-vits-svc'
tts_so_vits_svc_4_1_code_path='/Users/user/path/to/so-vits-svc4_1'
tts_so_vits_svc_voices='[
]'
tts_list_system_voices=False
tts_host=http://localhost:7077
llm_host=http://localhost:5000
sd_host=http://localhost:7860
sd_max_steps=40
sd_max_resolution=1280
sd_available_samplers=["Euler a", "Euler", "Heun", "DPM++ 2M", "DPM++ 2S a", "UniPC"]
sd_extra_prompt="a high quality image of {prompt}, 8k, masterpiece, detailed, accurate proportions"
sd_extra_negative_prompt="(worst quality:1.2), (lowres), deepfried, watermark, (blurry), jpeg noise, unsharp, deformed, {negative_prompt}"
sd_default_sampler="UniPC"
sd_default_n_iter=1
sd_default_width=512
sd_default_height=512
sd_default_tti_steps=22
sd_default_tti_cfg_scale=0
sd_default_iti_cfg_scale=8
sd_default_iti_steps=30
sd_default_iti_denoising_strength=0.58
sd_default_iti_sampler="Euler a"
sd_lora_custom_activations={"keyword": "trigger word <lora:lora_name:1>"}
sd_only_admins_can_change_models=False
sd_queue_size_per_user=5
sd_launch_process_automatically=False
sd_launch_command="python webui.py --api"
sd_launch_dir="/Users/user/stable-diffusion-webui/"
sd_launch_waittime=10
apply_mps_fixes=True
llm_queue_size_per_user=2
llm_backend=llama_cpp
llm_python_model_type=gpt2
llm_assistant_chronicler=instruct
llm_character=characters.llama_chat_default
llm_paths='{
"path_to_hf_llama":"/Users/user/LLaMA/hf-llama",
"path_to_llama_code":"/Users/user/LLaMA/llama-mps/",
"path_to_llama_weights":"/Users/user/LLaMA/7B/",
"path_to_llama_tokenizer":"/Users/user/LLaMA/tokenizer.model",
"path_to_llama_adapter":"/Users/LLaMA/LLaMA-Adapter/llama_adapter_len10_layer30_release.pth",
"path_to_llama_multimodal_adapter":"/Users/LLaMA/LLaMA-Adapter/ckpts/7f...13_BIAS-7B.pth",
"path_to_llama_lora":"/Users/user/LLaMA/alpaca-lora/models/aplaca-lora-7b",
"path_to_llama_cpp_weights":"/Users/user/LLaMA/llama.cpp_models/ggml-vicuna-7b-1.1-q4_2.gguf",
"path_to_llama_cpp_weights_dir":"/Users/user/LLaMA/llama.cpp_models/",
"path_to_gptj_weights":"/Users/user/gpt-j/GPT-J-6B_model",
"path_to_autohf_weights":"/Users/user/Cerebras-GPT-1.3B",
"path_to_gpt2_weights":"/Users/user/ru-gpt3-telegram-bot/rugpt3large_based_on_gpt2",
"path_to_minchatgpt_code":"/Users/user/minChatGPT/src",
"path_to_mlc_chatbot_code":"/Users/user/LLaMA/mlc-llm/mlc-chatbot/",
"path_to_mlc_pb_home_dir":"/Users/user/LLaMA/mlc-llm/",
"path_to_mlc_pb_binary_dir":""
}'
llm_history_grouping=chat
llm_max_history_items=10
llm_generation_cfg_override={}
llm_assistant_cfg_override={"early_stopping": true}
llm_assistant_use_in_chat_mode=False
llm_assistant_add_reply_context=True
llm_force_assistant_for_unsupported_models=False
llm_max_tokens=64
llm_max_assistant_tokens=128
llm_lcpp_max_context_size=66000
llm_lcpp_gpu_layers=1000
llm_remote_launch_process_automatically=True
llm_remote_launch_command="python3.10 server.py --api --n-gpu-layers 1000 --n_ctx 2048 --listen-port 5432"
llm_remote_launch_dir="/Users/user/text-generation-webui/"
llm_remote_model_name="orca-mini-v2_7b.ggmlv3.q4_0.bin"
llm_remote_launch_waittime=10
stt_backend=whisperS2T_CTranslate2
stt_model_path_or_name=tiny
stt_queue_size_per_user=1
tta_queue_size_per_user=1
tta_device=cpu
tta_music_model=facebook/musicgen-small
tta_sfx_model=facebook/audiogen-medium
tta_duration=3
mm_preload_models_on_start=False
mm_ram_cached_model_count_limit=10
mm_vram_cached_model_count_limit=10
mm_management_policy=BOTH
mm_unload_order_policy=LEAST_USED
mm_autounload_after_seconds=240
python_command=python
threaded_initialization=True
sys_webui_host=http://localhost:7007
sys_api_host=http://localhost:7008
sys_request_timeout=120
sys_api_log_level=warning
lang=en
extensions_config='{
}'