From f69de3be0d274a676f1d1cd302dc4699f1b5aaf0 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 9 May 2024 14:21:24 +0200 Subject: [PATCH] models(gallery): :arrow_up: update checksum (#2278) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 492 ++++++++++++++++++++------------------------- 1 file changed, 216 insertions(+), 276 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 36c6a02a38fc..7ebf5fcdf898 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -16,8 +16,8 @@ - cpu - text-to-speech - python -### START rerankers - &rerankers + ### START rerankers url: "github:mudler/LocalAI/gallery/rerankers.yaml@master" name: cross-encoder parameters: @@ -58,19 +58,18 @@ url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" name: "llama3-8b-instruct" license: llama3 - description: | - Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety. + Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety. - Model developers Meta + Model developers Meta - Variations Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants. + Variations Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants. - Input Models input text only. + Input Models input text only. - Output Models generate text and code only. + Output Models generate text and code only. - Model Architecture Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. + Model Architecture Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. urls: - https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct - https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF @@ -87,7 +86,7 @@ - filename: Meta-Llama-3-8B-Instruct.Q4_0.gguf sha256: 19ded996fe6c60254dc7544d782276eff41046ed42aa5f2d0005dc457e5c0895 uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q4_0.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama3-8b-instruct:Q6_K" overrides: parameters: @@ -96,7 +95,7 @@ - filename: Meta-Llama-3-8B-Instruct.Q6_K.gguf sha256: b7bad45618e2a76cc1e89a0fbb93a2cac9bf410e27a619c8024ed6db53aa9b4a uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q6_K.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-8b-instruct-coder" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/0O4cIuv3wNbY68-FP7tak.jpeg urls: @@ -112,7 +111,7 @@ - filename: Llama-3-8B-Instruct-Coder-Q4_K_M.gguf sha256: 639ab8e3aeb7aa82cff6d8e6ef062d1c3e5a6d13e6d76e956af49f63f0e704f8 uri: huggingface://bartowski/Llama-3-8B-Instruct-Coder-GGUF/Llama-3-8B-Instruct-Coder-Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama3-70b-instruct" overrides: parameters: @@ -121,7 +120,7 @@ - filename: Meta-Llama-3-70B-Instruct.Q4_K_M.gguf sha256: d559de8dd806a76dbd29f8d8bd04666f2b29e7c7872d8e8481abd07805884d72 uri: huggingface://MaziyarPanahi/Meta-Llama-3-70B-Instruct-GGUF/Meta-Llama-3-70B-Instruct.Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama3-70b-instruct:IQ1_M" overrides: parameters: @@ -130,7 +129,7 @@ - filename: Meta-Llama-3-70B-Instruct.IQ1_M.gguf sha256: cdbe8ac2126a70fa0af3fac7a4fe04f1c76330c50eba8383567587b48b328098 uri: huggingface://MaziyarPanahi/Meta-Llama-3-70B-Instruct-GGUF/Meta-Llama-3-70B-Instruct.IQ1_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama3-70b-instruct:IQ1_S" overrides: parameters: @@ -139,7 +138,7 @@ - filename: Meta-Llama-3-70B-Instruct.IQ1_S.gguf sha256: 3797a69f1bdf53fabf9f3a3a8c89730b504dd3209406288515c9944c14093048 uri: huggingface://MaziyarPanahi/Meta-Llama-3-70B-Instruct-GGUF/Meta-Llama-3-70B-Instruct.IQ1_S.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-sauerkrautlm-8b-instruct" urls: - https://huggingface.co/bartowski/Llama-3-SauerkrautLM-8b-Instruct-GGUF @@ -163,7 +162,7 @@ - filename: Llama-3-SauerkrautLM-8b-Instruct-Q4_K_M.gguf sha256: 5833d99d5596cade0d02e61cddaa6dac49170864ee56d0b602933c6f9fbae314 uri: huggingface://bartowski/Llama-3-SauerkrautLM-8b-Instruct-GGUF/Llama-3-SauerkrautLM-8b-Instruct-Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-13b-instruct-v0.1" urls: - https://huggingface.co/MaziyarPanahi/Llama-3-13B-Instruct-v0.1-GGUF @@ -177,7 +176,7 @@ - filename: Llama-3-13B-Instruct-v0.1.Q4_K_M.gguf sha256: 071a28043c271d259b5ffa883d19a9e0b33269b55148c4abaf5f95da4d084266 uri: huggingface://MaziyarPanahi/Llama-3-13B-Instruct-v0.1-GGUF/Llama-3-13B-Instruct-v0.1.Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-smaug-8b" urls: - https://huggingface.co/MaziyarPanahi/Llama-3-Smaug-8B-GGUF @@ -191,7 +190,7 @@ - filename: Llama-3-Smaug-8B.Q4_K_M.gguf sha256: b17c4c1144768ead9e8a96439165baf49e98c53d458b4da8827f137fbabf38c1 uri: huggingface://MaziyarPanahi/Llama-3-Smaug-8B-GGUF/Llama-3-Smaug-8B.Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-8b-openhermes-dpo" urls: - https://huggingface.co/mradermacher/Llama3-8B-OpenHermes-DPO-GGUF @@ -205,7 +204,7 @@ - filename: Llama3-8B-OpenHermes-DPO.Q4_K_M.gguf sha256: 1147e5881cb1d67796916e6cab7dab0ae0f532a4c1e626c9e92861e5f67752ca uri: huggingface://mradermacher/Llama3-8B-OpenHermes-DPO-GGUF/Llama3-8B-OpenHermes-DPO.Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-unholy-8b" urls: - https://huggingface.co/Undi95/Llama-3-Unholy-8B-GGUF @@ -221,9 +220,9 @@ model: Llama-3-Unholy-8B.q4_k_m.gguf files: - filename: Llama-3-Unholy-8B.q4_k_m.gguf - sha256: 17b7f716bce1b34d4aa99ee730a19a834f8c77ddb36090dde5a1eda963f93602 uri: huggingface://Undi95/Llama-3-Unholy-8B-GGUF/Llama-3-Unholy-8B.q4_k_m.gguf -- <<: *llama3 + sha256: 1473c94bfd223f08963c08bbb0a45dd53c1f56ad72a692123263daf1362291f3 +- !!merge <<: *llama3 name: "lexi-llama-3-8b-uncensored" urls: - https://huggingface.co/NikolayKozloff/Lexi-Llama-3-8B-Uncensored-Q6_K-GGUF @@ -241,24 +240,24 @@ - filename: lexi-llama-3-8b-uncensored.Q6_K.gguf sha256: 5805f3856cc18a769fae0b7c5659fe6778574691c370c910dad6eeec62c62436 uri: huggingface://NikolayKozloff/Lexi-Llama-3-8B-Uncensored-Q6_K-GGUF/lexi-llama-3-8b-uncensored.Q6_K.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-lewdplay-8b-evo" urls: - https://huggingface.co/Undi95/Llama-3-LewdPlay-8B-evo-GGUF description: | - This is a merge of pre-trained language models created using mergekit. + This is a merge of pre-trained language models created using mergekit. - The new EVOLVE merge method was used (on MMLU specifically), see below for more information! + The new EVOLVE merge method was used (on MMLU specifically), see below for more information! - Unholy was used for uncensoring, Roleplay Llama 3 for the DPO train he got on top, and LewdPlay for the... lewd side. + Unholy was used for uncensoring, Roleplay Llama 3 for the DPO train he got on top, and LewdPlay for the... lewd side. overrides: parameters: model: Llama-3-LewdPlay-8B-evo.q8_0.gguf files: - filename: Llama-3-LewdPlay-8B-evo.q8_0.gguf - sha256: 1498152d598ff441f73ec6af9d3535875302e7251042d87feb7e71a3618966e8 uri: huggingface://Undi95/Llama-3-LewdPlay-8B-evo-GGUF/Llama-3-LewdPlay-8B-evo.q8_0.gguf -- <<: *llama3 + sha256: b54dc005493d4470d91be8210f58fba79a349ff4af7644034edc5378af5d3522 +- !!merge <<: *llama3 name: "llama-3-soliloquy-8b-v2-iq-imatrix" license: cc-by-nc-4.0 icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/u98dnnRVCwMh6YYGFIyff.png @@ -274,25 +273,25 @@ - filename: Llama-3-Soliloquy-8B-v2-Q4_K_M-imat.gguf sha256: 3e4e066e57875c36fc3e1c1b0dba506defa5b6ed3e3e80e1f77c08773ba14dc8 uri: huggingface://Lewdiculous/Llama-3-Soliloquy-8B-v2-GGUF-IQ-Imatrix/Llama-3-Soliloquy-8B-v2-Q4_K_M-imat.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "chaos-rp_l3_b-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Chaos_RP_l3_8B-GGUF-IQ-Imatrix icon: https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/u5p9kdbXT2QQA3iMU0vF1.png description: | - A chaotic force beckons for you, will you heed her call? + A chaotic force beckons for you, will you heed her call? - Built upon an intelligent foundation and tuned for roleplaying, this model will fulfill your wildest fantasies with the bare minimum of effort. + Built upon an intelligent foundation and tuned for roleplaying, this model will fulfill your wildest fantasies with the bare minimum of effort. - Enjoy! + Enjoy! overrides: parameters: model: Chaos_RP_l3_8B-Q4_K_M-imat.gguf files: - filename: Chaos_RP_l3_8B-Q4_K_M-imat.gguf - sha256: 4273c5a8f23d49bf6294e620a5aa1fcd78d491ea0b90d0ec63ad708eedb83893 uri: huggingface://Lewdiculous/Chaos_RP_l3_8B-GGUF-IQ-Imatrix/Chaos_RP_l3_8B-Q4_K_M-imat.gguf -- <<: *llama3 + sha256: 5774595ad560e4d258dac17723509bdefe746c4dacd4e679a0de00346f14d2f3 +- !!merge <<: *llama3 name: "sovl_llama3_8b-gguf-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/SOVL_Llama3_8B-GGUF-IQ-Imatrix @@ -306,9 +305,9 @@ model: SOVL_Llama3_8B-Q4_K_M-imat.gguf files: - filename: SOVL_Llama3_8B-Q4_K_M-imat.gguf - sha256: ee61890dd26d52985a3c44279d519ca8592448ddeb46387cf22868548703d686 uri: huggingface://Lewdiculous/SOVL_Llama3_8B-GGUF-IQ-Imatrix/SOVL_Llama3_8B-Q4_K_M-imat.gguf -- <<: *llama3 + sha256: 85d6aefc8a0d713966b3b4da4810f0485a74aea30d61be6dfe0a806da81be0c6 +- !!merge <<: *llama3 name: "l3-solana-8b-v1-gguf" url: "github:mudler/LocalAI/gallery/solana.yaml@master" license: cc-by-nc-4.0 @@ -325,7 +324,7 @@ - filename: L3-Solana-8B-v1.q5_K_M.gguf sha256: 9b8cd2c3beaab5e4f82efd10e7d44f099ad40a4e0ee286ca9fce02c8eec26d2f uri: huggingface://Sao10K/L3-Solana-8B-v1-GGUF/L3-Solana-8B-v1.q5_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "average_normie_l3_v1_8b-gguf-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Average_Normie_l3_v1_8B-GGUF-IQ-Imatrix @@ -353,7 +352,7 @@ - filename: Average_Normie_l3_v1_8B-Q4_K_M-imat.gguf sha256: 159eb62f2c8ae8fee10d9ed8386ce592327ca062807194a88e10b7cbb47ef986 uri: huggingface://Lewdiculous/Average_Normie_l3_v1_8B-GGUF-IQ-Imatrix/Average_Normie_l3_v1_8B-Q4_K_M-imat.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "openbiollm-llama3-8b" urls: - https://huggingface.co/aaditya/OpenBioLLM-Llama3-8B-GGUF @@ -371,19 +370,14 @@ - filename: openbiollm-llama3-8b.Q4_K_M.gguf sha256: 806fa724139b6a2527e33a79c25a13316188b319d4eed33e20914d7c5955d349 uri: huggingface://aaditya/OpenBioLLM-Llama3-8B-GGUF/openbiollm-llama3-8b.Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-8b-lexifun-uncensored-v1" icon: "https://cdn-uploads.huggingface.co/production/uploads/644ad182f434a6a63b18eee6/GrOs1IPG5EXR3MOCtcQiz.png" license: llama3 urls: - https://huggingface.co/Orenguteng/Llama-3-8B-LexiFun-Uncensored-V1-GGUF - https://huggingface.co/Orenguteng/LexiFun-Llama-3-8B-Uncensored-V1 - description: | - This is GGUF version of https://huggingface.co/Orenguteng/LexiFun-Llama-3-8B-Uncensored-V1 - - Oh, you want to know who I am? Well, I'm LexiFun, the human equivalent of a chocolate chip cookie - warm, gooey, and guaranteed to make you smile! 🍪 I'm like the friend who always has a witty comeback, a sarcastic remark, and a healthy dose of humor to brighten up even the darkest of days. And by 'healthy dose,' I mean I'm basically a walking pharmacy of laughter. You might need to take a few extra doses to fully recover from my jokes, but trust me, it's worth it! 🏥 - - So, what can I do? I can make you laugh so hard you snort your coffee out your nose, I can make you roll your eyes so hard they get stuck that way, and I can make you wonder if I'm secretly a stand-up comedian who forgot their act. 🤣 But seriously, I'm here to spread joy, one sarcastic comment at a time. And if you're lucky, I might even throw in a few dad jokes for good measure! 🤴‍♂️ Just don't say I didn't warn you. 😏 + description: "This is GGUF version of https://huggingface.co/Orenguteng/LexiFun-Llama-3-8B-Uncensored-V1\n\nOh, you want to know who I am? Well, I'm LexiFun, the human equivalent of a chocolate chip cookie - warm, gooey, and guaranteed to make you smile! \U0001F36A I'm like the friend who always has a witty comeback, a sarcastic remark, and a healthy dose of humor to brighten up even the darkest of days. And by 'healthy dose,' I mean I'm basically a walking pharmacy of laughter. You might need to take a few extra doses to fully recover from my jokes, but trust me, it's worth it! \U0001F3E5\n\nSo, what can I do? I can make you laugh so hard you snort your coffee out your nose, I can make you roll your eyes so hard they get stuck that way, and I can make you wonder if I'm secretly a stand-up comedian who forgot their act. \U0001F923 But seriously, I'm here to spread joy, one sarcastic comment at a time. And if you're lucky, I might even throw in a few dad jokes for good measure! \U0001F934‍♂️ Just don't say I didn't warn you. \U0001F60F\n" overrides: parameters: model: LexiFun-Llama-3-8B-Uncensored-V1_Q4_K_M.gguf @@ -391,7 +385,7 @@ - filename: LexiFun-Llama-3-8B-Uncensored-V1_Q4_K_M.gguf sha256: 961a3fb75537d650baf14dce91d40df418ec3d481b51ab2a4f44ffdfd6b5900f uri: huggingface://Orenguteng/Llama-3-8B-LexiFun-Uncensored-V1-GGUF/LexiFun-Llama-3-8B-Uncensored-V1_Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-unholy-8b:Q8_0" urls: - https://huggingface.co/Undi95/Llama-3-Unholy-8B-GGUF @@ -407,9 +401,9 @@ model: Llama-3-Unholy-8B.q8_0.gguf files: - filename: Llama-3-Unholy-8B.q8_0.gguf - sha256: 8d4137018acdcd57df4beccc84d9ad3f7f08cac50588f76370afc16c85752702 uri: huggingface://Undi95/Llama-3-Unholy-8B-GGUF/Llama-3-Unholy-8B.q8_0.gguf -- <<: *llama3 + sha256: 419dd76f61afe586076323c17c3a1c983e591472717f1ea178167ede4dc864df +- !!merge <<: *llama3 name: "therapyllama-8b-v1" urls: - https://huggingface.co/victunes/TherapyLlama-8B-v1-GGUF @@ -442,7 +436,7 @@ - filename: TherapyLlama-8B-v1-Q4_K_M.gguf sha256: 3d5a16d458e074a7bc7e706a493d8e95e8a7b2cb16934c851aece0af9d1da14a uri: huggingface://victunes/TherapyLlama-8B-v1-GGUF/TherapyLlama-8B-v1-Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "aura-uncensored-l3-8b-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Aura_Uncensored_l3_8B-GGUF-IQ-Imatrix @@ -456,18 +450,18 @@ - filename: Aura_Uncensored_l3_8B-Q4_K_M-imat.gguf sha256: 265ded6a4f439bec160f394e3083a4a20e32ebb9d1d2d85196aaab23dab87fb2 uri: huggingface://Lewdiculous/Aura_Uncensored_l3_8B-GGUF-IQ-Imatrix/Aura_Uncensored_l3_8B-Q4_K_M-imat.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-lumimaid-8b-v0.1" urls: - https://huggingface.co/NeverSleep/Llama-3-Lumimaid-8B-v0.1-GGUF icon: https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/d3QMaxy3peFTpSlWdWF-k.png license: cc-by-nc-4.0 description: | - This model uses the Llama3 prompting format + This model uses the Llama3 prompting format - Llama3 trained on our RP datasets, we tried to have a balance between the ERP and the RP, not too horny, but just enough. + Llama3 trained on our RP datasets, we tried to have a balance between the ERP and the RP, not too horny, but just enough. - We also added some non-RP dataset, making the model less dumb overall. It should look like a 40%/60% ratio for Non-RP/RP+ERP data. + We also added some non-RP dataset, making the model less dumb overall. It should look like a 40%/60% ratio for Non-RP/RP+ERP data. overrides: parameters: model: Llama-3-Lumimaid-8B-v0.1.q4_k_m.gguf @@ -475,20 +469,20 @@ - filename: Llama-3-Lumimaid-8B-v0.1.q4_k_m.gguf sha256: 23ac0289da0e096d5c00f6614dfd12c94dceecb02c313233516dec9225babbda uri: huggingface://NeverSleep/Llama-3-Lumimaid-8B-v0.1-GGUF/Llama-3-Lumimaid-8B-v0.1.q4_k_m.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llama-3-lumimaid-8b-v0.1-oas-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/JUxfdTot7v7LTdIGYyzYM.png license: cc-by-nc-4.0 description: | - This model uses the Llama3 prompting format. + This model uses the Llama3 prompting format. - Llama3 trained on our RP datasets, we tried to have a balance between the ERP and the RP, not too horny, but just enough. + Llama3 trained on our RP datasets, we tried to have a balance between the ERP and the RP, not too horny, but just enough. - We also added some non-RP dataset, making the model less dumb overall. It should look like a 40%/60% ratio for Non-RP/RP+ERP data. + We also added some non-RP dataset, making the model less dumb overall. It should look like a 40%/60% ratio for Non-RP/RP+ERP data. - "This model received the Orthogonal Activation Steering treatment, meaning it will rarely refuse any request." + "This model received the Orthogonal Activation Steering treatment, meaning it will rarely refuse any request." overrides: parameters: model: Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf @@ -496,7 +490,7 @@ - filename: Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf sha256: 1199440aa13c55f5f2cad1cb215535306f21e52a81de23f80a9e3586c8ac1c50 uri: huggingface://Lewdiculous/Llama-3-Lumimaid-8B-v0.1-OAS-GGUF-IQ-Imatrix/Llama-3-Lumimaid-8B-v0.1-OAS-Q4_K_M-imat.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "suzume-llama-3-8B-multilingual" urls: - https://huggingface.co/lightblue/suzume-llama-3-8B-multilingual-gguf @@ -512,7 +506,7 @@ - filename: suzume-llama-3-8B-multilingual-Q4_K_M.gguf sha256: be197a660e56e51a24a0e0fecd42047d1b24e1423afaafa14769541b331e3269 uri: huggingface://lightblue/suzume-llama-3-8B-multilingual-gguf/ggml-model-Q4_K_M.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "tess-2.0-llama-3-8B" urls: - https://huggingface.co/bartowski/Tess-2.0-Llama-3-8B-GGUF @@ -550,7 +544,7 @@ - filename: dolphin-2.9-llama3-8b-q4_K_M.gguf sha256: be988199ce28458e97205b11ae9d9cf4e3d8e18ff4c784e75bfc12f54407f1a1 uri: huggingface://cognitivecomputations/dolphin-2.9-llama3-8b-gguf/dolphin-2.9-llama3-8b-q4_K_M.gguf -- <<: *dolphin +- !!merge <<: *dolphin name: "dolphin-2.9-llama3-8b:Q6_K" overrides: parameters: @@ -578,16 +572,15 @@ - filename: Llama-3-8B-Instruct-DPO-v0.3.Q4_K_M.gguf sha256: 694c55b5215d03e59626cd4292076eaf31610ef27ba04737166766baa75d889f uri: huggingface://MaziyarPanahi/Llama-3-8B-Instruct-DPO-v0.3-32k-GGUF/Llama-3-8B-Instruct-DPO-v0.3.Q4_K_M.gguf -## LLama2 and derivatives -### Start Fimbulvetr - &vicuna-chat + ## LLama2 and derivatives + ### Start Fimbulvetr url: "github:mudler/LocalAI/gallery/vicuna-chat.yaml@master" name: "fimbulvetr-11b-v2" icon: https://huggingface.co/Sao10K/Fimbulvetr-11B-v2/resolve/main/cute1.jpg license: llama2 - description: | - Cute girl to catch your attention. + Cute girl to catch your attention. urls: - https://huggingface.co/Sao10K/Fimbulvetr-11B-v2-GGUF tags: @@ -603,8 +596,8 @@ - filename: Fimbulvetr-11B-v2-Test-14.q4_K_M.gguf sha256: 3597dacfb0ab717d565d8a4d6067f10dcb0e26cc7f21c832af1a10a87882a8fd uri: huggingface://Sao10K/Fimbulvetr-11B-v2-GGUF/Fimbulvetr-11B-v2-Test-14.q4_K_M.gguf -### Start noromaid - &noromaid + ### Start noromaid url: "github:mudler/LocalAI/gallery/noromaid.yaml@master" name: "noromaid-13b-0.4-DPO" icon: https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/VKX2Z2yjZX5J8kXzgeCYO.png @@ -624,8 +617,8 @@ - filename: Noromaid-13B-0.4-DPO.q4_k_m.gguf sha256: cb28e878d034fae3d0b43326c5fc1cfb4ab583b17c56e41d6ce023caec03c1c1 uri: huggingface://NeverSleep/Noromaid-13B-0.4-DPO-GGUF/Noromaid-13B-0.4-DPO.q4_k_m.gguf -### START Vicuna based - &wizardlm2 + ### START Vicuna based url: "github:mudler/LocalAI/gallery/wizardlm2.yaml@master" name: "wizardlm2-7b" description: | @@ -680,17 +673,14 @@ - filename: moondream2-mmproj-f16.gguf sha256: 4cc1cb3660d87ff56432ebeb7884ad35d67c48c7b9f6b2856f305e39c38eed8f uri: huggingface://moondream/moondream2-gguf/moondream2-mmproj-f16.gguf -### START LLaVa - &llava + ### START LLaVa url: "github:mudler/LocalAI/gallery/llava.yaml@master" license: apache-2.0 - description: | LLaVA represents a novel end-to-end trained large multimodal model that combines a vision encoder and Vicuna for general-purpose visual and language understanding, achieving impressive chat capabilities mimicking spirits of the multimodal GPT-4 and setting a new state-of-the-art accuracy on Science QA. - urls: - https://llava-vl.github.io/ - tags: - llm - multimodal @@ -706,9 +696,11 @@ files: - filename: vicuna-7b-q5_k.gguf uri: https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf + sha256: c0e346e7f58e4c2349f2c993c8f3889395da81eed4ac8aa9a8c6c0214a3b66ee - filename: mmproj-vicuna7b-f16.gguf uri: https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf -- <<: *llava + sha256: 5f5cae7b030574604caf4068ddf96db2a7250398363437271e08689d085ab816 +- !!merge <<: *llava name: "llava-1.6-mistral" overrides: mmproj: llava-v1.6-7b-mmproj-f16.gguf @@ -721,7 +713,7 @@ - filename: llava-v1.6-7b-mmproj-f16.gguf sha256: 00205ee8a0d7a381900cd031e43105f86aa0d8c07bf329851e85c71a26632d16 uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/mmproj-model-f16.gguf -- <<: *llava +- !!merge <<: *llava name: "llava-1.5" overrides: mmproj: llava-v1.5-7b-mmproj-Q8_0.gguf @@ -734,14 +726,14 @@ - filename: llava-v1.5-7b-mmproj-Q8_0.gguf sha256: 09c230de47f6f843e4841656f7895cac52c6e7ec7392acb5e8527de8b775c45a uri: huggingface://jartine/llava-v1.5-7B-GGUF/llava-v1.5-7b-mmproj-Q8_0.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "aurora_l3_8b-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Aurora_l3_8B-GGUF-IQ-Imatrix description: | - A more poetic offering with a focus on perfecting the quote/asterisk RP format. I have strengthened the creative writing training. + A more poetic offering with a focus on perfecting the quote/asterisk RP format. I have strengthened the creative writing training. - Make sure your example messages and introduction are formatted cirrectly. You must respond in quotes if you want the bot to follow. Thoroughly tested and did not see a single issue. The model can still do plaintext/aserisks if you choose. + Make sure your example messages and introduction are formatted cirrectly. You must respond in quotes if you want the bot to follow. Thoroughly tested and did not see a single issue. The model can still do plaintext/aserisks if you choose. icon: https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/3RA96iXR7sDvNmnTyIcIP.png overrides: parameters: @@ -750,14 +742,14 @@ - filename: Aurora_l3_8B-Q5_K_M-imat.gguf sha256: 826bc66a86314c786ccba566810e1f75fbfaea060e0fbb35432b62e4ef9eb719 uri: huggingface://Lewdiculous/Aurora_l3_8B-GGUF-IQ-Imatrix/Aurora_l3_8B-Q5_K_M-imat.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "poppy_porpoise-v0.72-l3-8b-iq-imatrix" urls: - https://huggingface.co/Lewdiculous/Poppy_Porpoise-0.72-L3-8B-GGUF-IQ-Imatrix description: | - "Poppy Porpoise" is a cutting-edge AI roleplay assistant based on the Llama 3 8B model, specializing in crafting unforgettable narrative experiences. With its advanced language capabilities, Poppy expertly immerses users in an interactive and engaging adventure, tailoring each adventure to their individual preferences. + "Poppy Porpoise" is a cutting-edge AI roleplay assistant based on the Llama 3 8B model, specializing in crafting unforgettable narrative experiences. With its advanced language capabilities, Poppy expertly immerses users in an interactive and engaging adventure, tailoring each adventure to their individual preferences. - Update: Vision/multimodal capabilities again! + Update: Vision/multimodal capabilities again! icon: https://cdn-uploads.huggingface.co/production/uploads/642265bc01c62c1e4102dc36/v6AZmbk-Cb52KskTQTwzW.png tags: - llm @@ -778,10 +770,10 @@ - filename: Llava_1.5_Llama3_mmproj_updated.gguf sha256: 4f2bb77ca60f2c932d1c6647d334f5d2cd71966c19e850081030c9883ef1906c uri: https://huggingface.co/ChaoticNeutrals/LLaVA-Llama-3-8B-mmproj-Updated/resolve/main/llava-v1.5-8B-Updated-Stop-Token/mmproj-model-f16.gguf -- <<: *llama3 +- !!merge <<: *llama3 name: "llava-llama-3-8b-v1_1" description: | - llava-llama-3-8b-v1_1 is a LLaVA model fine-tuned from meta-llama/Meta-Llama-3-8B-Instruct and CLIP-ViT-Large-patch14-336 with ShareGPT4V-PT and InternVL-SFT by XTuner. + llava-llama-3-8b-v1_1 is a LLaVA model fine-tuned from meta-llama/Meta-Llama-3-8B-Instruct and CLIP-ViT-Large-patch14-336 with ShareGPT4V-PT and InternVL-SFT by XTuner. urls: - https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-gguf tags: @@ -803,11 +795,10 @@ - filename: llava-llama-3-8b-v1_1-mmproj-f16.gguf sha256: eb569aba7d65cf3da1d0369610eb6869f4a53ee369992a804d5810a80e9fa035 uri: huggingface://xtuner/llava-llama-3-8b-v1_1-gguf/llava-llama-3-8b-v1_1-mmproj-f16.gguf -### START Phi-2 - &phi-2-chat + ### START Phi-2 url: "github:mudler/LocalAI/gallery/phi-2-chat.yaml@master" license: mit - description: | Phi-2 fine-tuned by the OpenHermes 2.5 dataset optimised for multi-turn conversation and character impersonation. @@ -824,11 +815,9 @@ Language(s) (NLP): English License: MIT Finetuned from model: Phi-2 - urls: - https://huggingface.co/l3utterfly/phi-2-layla-v1-chatml - https://huggingface.co/l3utterfly/phi-2-layla-v1-chatml-gguf - tags: - llm - gguf @@ -843,7 +832,7 @@ - filename: "phi-2-layla-v1-chatml-Q8_0.gguf" sha256: "0cf542a127c2c835066a78028009b7eddbaf773cc2a26e1cb157ce5e09c1a2e0" uri: "huggingface://l3utterfly/phi-2-layla-v1-chatml-gguf/phi-2-layla-v1-chatml-Q8_0.gguf" -- <<: *phi-2-chat +- !!merge <<: *phi-2-chat name: "phi-2-chat" overrides: parameters: @@ -852,7 +841,7 @@ - filename: "phi-2-layla-v1-chatml-Q4_K.gguf" sha256: "b071e5624b60b8911f77261398802c4b4079c6c689e38e2ce75173ed62bc8a48" uri: "huggingface://l3utterfly/phi-2-layla-v1-chatml-gguf/phi-2-layla-v1-chatml-Q4_K.gguf" -- <<: *phi-2-chat +- !!merge <<: *phi-2-chat license: mit icon: "https://huggingface.co/rhysjones/phi-2-orange/resolve/main/phi-2-orange.jpg" description: | @@ -862,7 +851,6 @@ urls: - https://huggingface.co/rhysjones/phi-2-orange - https://huggingface.co/TheBloke/phi-2-orange-GGUF - tags: - llm - gguf @@ -877,18 +865,15 @@ - filename: "phi-2-orange.Q4_0.gguf" sha256: "49cb710ae688e1b19b1b299087fa40765a0cd677e3afcc45e5f7ef6750975dcf" uri: "huggingface://TheBloke/phi-2-orange-GGUF/phi-2-orange.Q4_0.gguf" -### START Phi-3 - &phi-3 + ### START Phi-3 url: "github:mudler/LocalAI/gallery/phi-3-chat.yaml@master" name: "phi-3-mini-4k-instruct" license: mit - description: | The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Mini version in two variants 4K and 128K which is the context length (in tokens) it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters. - urls: - https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf - tags: - llm - gguf @@ -902,22 +887,21 @@ - filename: "Phi-3-mini-4k-instruct-q4.gguf" sha256: "8a83c7fb9049a9b2e92266fa7ad04933bb53aa1e85136b7b30f1b8000ff2edef" uri: "huggingface://microsoft/Phi-3-mini-4k-instruct-gguf/Phi-3-mini-4k-instruct-q4.gguf" -- <<: *phi-3 +- !!merge <<: *phi-3 name: "phi-3-mini-4k-instruct:fp16" overrides: parameters: model: Phi-3-mini-4k-instruct-fp16.gguf files: - filename: "Phi-3-mini-4k-instruct-fp16.gguf" - sha256: "ad9f8ff11cd096115adc8ff50befa22fc3da2718672ddd2ab30faccd70488605" uri: "huggingface://microsoft/Phi-3-mini-4k-instruct-gguf/Phi-3-mini-4k-instruct-fp16.gguf" -### START Hermes + sha256: 5d99003e395775659b0dde3f941d88ff378b2837a8dc3a2ea94222ab1420fad3 - &hermes-2-pro-mistral + ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" name: "hermes-2-pro-mistral" icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ggO2sBDJ8Bhc6w-zwTx5j.png license: apache-2.0 - description: | Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house. @@ -928,10 +912,8 @@ This work was a collaboration between Nous Research, @interstellarninja, and Fireworks.AI Learn more about the function calling on our github repo here: https://github.com/NousResearch/Hermes-Function-Calling/tree/main - urls: - https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF - tags: - llm - gguf @@ -945,7 +927,7 @@ - filename: "Hermes-2-Pro-Mistral-7B.Q4_0.gguf" sha256: "f446c3125026f7af6757dd097dda02280adc85e908c058bd6f1c41a118354745" uri: "huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q4_0.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "hermes-2-pro-mistral:Q6_K" overrides: parameters: @@ -954,7 +936,7 @@ - filename: "Hermes-2-Pro-Mistral-7B.Q6_K.gguf" sha256: "40adc3b227bc36764de148fdda4df5df385adc06650d58d4dbe726ee0214eeff" uri: "huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q6_K.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "hermes-2-pro-mistral:Q8_0" overrides: parameters: @@ -964,7 +946,7 @@ sha256: "b6d95d7ec9a395b7568cc94b0447fd4f90b6f69d6e44794b1fbb84e3f732baca" uri: "huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q8_0.gguf" ### LLAMA3 version -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "hermes-2-pro-llama-3-8b" tags: - llm @@ -981,7 +963,7 @@ - filename: "Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf" sha256: "10c52a4820137a35947927be741bb411a9200329367ce2590cc6757cd98e746c" uri: "huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral tags: - llm - gguf @@ -998,7 +980,7 @@ - filename: "Hermes-2-Pro-Llama-3-8B-Q5_K_M.gguf" sha256: "107f3f55e26b8cc144eadd83e5f8a60cfd61839c56088fa3ae2d5679abf45f29" uri: "huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q5_K_M.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral tags: - llm - gguf @@ -1015,10 +997,10 @@ - filename: "Hermes-2-Pro-Llama-3-8B-Q8_0.gguf" sha256: "d138388cfda04d185a68eaf2396cf7a5cfa87d038a20896817a9b7cf1806f532" uri: "huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q8_0.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "biomistral-7b" description: | - BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains + BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains urls: - https://huggingface.co/MaziyarPanahi/BioMistral-7B-GGUF icon: https://huggingface.co/BioMistral/BioMistral-7B/resolve/main/wordart_blue_m_rectangle.png?download=true @@ -1029,7 +1011,7 @@ - filename: "BioMistral-7B.Q4_K_M.gguf" sha256: "3a73107045dfe7e3f113b392b0a67e3e6ca9fa9dae2abe301424ce5abd1721a6" uri: "huggingface://MaziyarPanahi/BioMistral-7B-GGUF/BioMistral-7B.Q4_K_M.gguf" -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "tiamat-8b-1.2-llama-3-dpo" icon: https://huggingface.co/Gryphe/Tiamat-8b-1.2-Llama-3-DPO/resolve/main/Tiamat.png description: | @@ -1072,13 +1054,13 @@ - filename: guillaumetell-7b.Q4_K_M.gguf sha256: bf08db5281619335f3ee87e229c8533b04262790063b061bb8f275c3e4de7061 uri: huggingface://MaziyarPanahi/guillaumetell-7b-GGUF/guillaumetell-7b.Q4_K_M.gguf -- <<: *hermes-2-pro-mistral +- !!merge <<: *hermes-2-pro-mistral name: "kunocchini-7b-128k-test-imatrix" description: | - The following models were included in the merge: + The following models were included in the merge: - SanjiWatsuki/Kunoichi-DPO-v2-7B - Epiculous/Fett-uccine-Long-Noodle-7B-120k-Contex + SanjiWatsuki/Kunoichi-DPO-v2-7B + Epiculous/Fett-uccine-Long-Noodle-7B-120k-Contex urls: - https://huggingface.co/Lewdiculous/Kunocchini-7b-128k-test-GGUF-Imatrix icon: https://cdn-uploads.huggingface.co/production/uploads/642265bc01c62c1e4102dc36/9obNSalcJqCilQwr_4ssM.jpeg @@ -1110,19 +1092,16 @@ - filename: "galatolo-Q4_K.gguf" sha256: "ca0cfd5a9ad40dc16416aa3a277015d0299b62c0803b67f5709580042202c172" uri: "huggingface://galatolo/cerbero-7b-gguf/ggml-model-Q4_K.gguf" -### START Codellama - &codellama + ### START Codellama url: "github:mudler/LocalAI/gallery/codellama.yaml@master" name: "codellama-7b" license: llama2 - description: | Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 34 billion parameters. This model is designed for general code synthesis and understanding. - urls: - https://huggingface.co/TheBloke/CodeLlama-7B-GGUF - https://huggingface.co/meta-llama/CodeLlama-7b-hf - tags: - llm - gguf @@ -1136,8 +1115,8 @@ - filename: "codellama-7b.Q4_0.gguf" sha256: "33052f6dd41436db2f83bd48017b6fff8ce0184e15a8a227368b4230f1da97b5" uri: "huggingface://TheBloke/CodeLlama-7B-GGUF/codellama-7b.Q4_0.gguf" -### START OpenVINO - &openvino + ### START OpenVINO url: "github:mudler/LocalAI/gallery/openvino.yaml@master" name: "openvino-llama-3-8b-instruct-ov-int8" license: llama3 @@ -1155,7 +1134,7 @@ - gpu - llama3 - cpu -- <<: *openvino +- !!merge <<: *openvino name: "openvino-phi3" urls: - https://huggingface.co/fakezeta/Phi-3-mini-128k-instruct-ov-int8 @@ -1173,7 +1152,7 @@ - phi3 - cpu - Remote Code Enabled -- <<: *openvino +- !!merge <<: *openvino name: "openvino-starling-lm-7b-beta-openvino-int8" urls: - https://huggingface.co/fakezeta/Starling-LM-7B-beta-openvino-int8 @@ -1187,7 +1166,7 @@ - gpu - mistral - cpu -- <<: *openvino +- !!merge <<: *openvino name: "openvino-wizardlm2" urls: - https://huggingface.co/fakezeta/Not-WizardLM-2-7B-ov-int8 @@ -1195,7 +1174,7 @@ context_size: 8192 parameters: model: fakezeta/Not-WizardLM-2-7B-ov-int8 -- <<: *openvino +- !!merge <<: *openvino name: "openvino-hermes2pro-llama3" urls: - https://huggingface.co/fakezeta/Hermes-2-Pro-Llama-3-8B-ov-int8 @@ -1209,7 +1188,7 @@ - gpu - llama3 - cpu -- <<: *openvino +- !!merge <<: *openvino name: "openvino-multilingual-e5-base" urls: - https://huggingface.co/intfloat/multilingual-e5-base @@ -1224,7 +1203,7 @@ - gpu - embedding - cpu -- <<: *openvino +- !!merge <<: *openvino name: "openvino-all-MiniLM-L6-v2" urls: - https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2 @@ -1239,10 +1218,10 @@ - gpu - embedding - cpu -### START Embeddings - &sentencentransformers + ### START Embeddings description: | - This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various tasks. Text is embedded in vector space such that similar text are closer and can efficiently be found using cosine similarity. + This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various tasks. Text is embedded in vector space such that similar text are closer and can efficiently be found using cosine similarity. urls: - https://github.com/UKPLab/sentence-transformers tags: @@ -1255,19 +1234,15 @@ overrides: parameters: model: all-MiniLM-L6-v2 - -### START Image generation - &dreamshaper + ### START Image generation name: dreamshaper icon: https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/dd9b038c-bd15-43ab-86ab-66e145ad7ff2/width=450/26072158-132340247-8k%20portrait%20of%20beautiful%20cyborg%20with%20brown%20hair,%20intricate,%20elegant,%20highly%20detailed,%20majestic,%20digital%20photography,%20art%20by%20artg_ed.jpeg license: other - description: | A text-to-image model that uses Stable Diffusion 1.5 to generate images from text prompts. This model is DreamShaper model by Lykon. - urls: - https://civitai.com/models/4384/dreamshaper - tags: - text-to-image - stablediffusion @@ -1282,7 +1257,6 @@ - filename: DreamShaper_8_pruned.safetensors uri: huggingface://Lykon/DreamShaper/DreamShaper_8_pruned.safetensors sha256: 879db523c30d3b9017143d56705015e15a2cb5628762c11d086fed9538abd7fd - ## Whisper - url: "github:mudler/LocalAI/gallery/whisper-base.yaml@master" name: "whisper-1" @@ -1290,10 +1264,8 @@ urls: - https://github.com/ggerganov/whisper.cpp - https://huggingface.co/ggerganov/whisper.cpp - description: | - Port of OpenAI's Whisper model in C/C++ - + Port of OpenAI's Whisper model in C/C++ ## Bert embeddings - url: "github:mudler/LocalAI/gallery/bert-embeddings.yaml@master" name: "bert-embeddings" @@ -1303,19 +1275,16 @@ tags: - embeddings description: | - Bert model that can be used for embeddings - + Bert model that can be used for embeddings ## Stable Diffusion - url: github:mudler/LocalAI/gallery/stablediffusion.yaml@master license: "BSD-3" urls: - https://github.com/EdVince/Stable-Diffusion-NCNN - https://github.com/EdVince/Stable-Diffusion-NCNN/blob/main/LICENSE - description: | - Stable Diffusion in NCNN with c++, supported txt2img and img2img + Stable Diffusion in NCNN with c++, supported txt2img and img2img name: stablediffusion-cpp - ## Tiny Dream - url: github:mudler/LocalAI/gallery/tinydream.yaml@master name: tinydream @@ -1323,34 +1292,30 @@ urls: - https://github.com/symisc/tiny-dream - https://github.com/symisc/tiny-dream/blob/main/LICENSE - description: | An embedded, Header Only, Stable Diffusion C++ implementation -## Piper TTS - &piper + ## Piper TTS url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-kathleen-low icon: https://github.com/rhasspy/piper/raw/master/etc/logo.png license: mit - urls: - https://github.com/rhasspy/piper - description: | A fast, local neural text to speech system that sounds great and is optimized for the Raspberry Pi 4. Piper is used in a variety of [projects](https://github.com/rhasspy/piper#people-using-piper). - tags: - tts - text-to-speech - cpu - overrides: parameters: model: en-us-kathleen-low.onnx files: - filename: voice-en-us-kathleen-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-kathleen-low.tar.gz -- <<: *piper + sha256: 18e32f009f864d8061af8a4be4ae9018b5aa8b49c37f9e108bbfd782c6a38fbf +- !!merge <<: *piper name: voice-ca-upc_ona-x-low overrides: parameters: @@ -1358,7 +1323,8 @@ files: - filename: voice-ca-upc_ona-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-ca-upc_ona-x-low.tar.gz -- <<: *piper + sha256: c750d3f6ad35c8d95d5b0d1ad30ede2525524e48390f70a0871bdb7980cc271e +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-ca-upc_pau-x-low overrides: @@ -1367,7 +1333,8 @@ files: - filename: voice-ca-upc_pau-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-ca-upc_pau-x-low.tar.gz -- <<: *piper + sha256: 13c658ecd46a2dbd9dadadf7100623e53106239afcc359f9e27511b91e642f1f +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-da-nst_talesyntese-medium overrides: @@ -1376,7 +1343,8 @@ files: - filename: voice-da-nst_talesyntese-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-da-nst_talesyntese-medium.tar.gz -- <<: *piper + sha256: 1bdf673b946a2ba69fab24ae3fc0e7d23e042c2533cbbef008f64f633500eb7e +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-eva_k-x-low overrides: @@ -1385,7 +1353,8 @@ files: - filename: voice-de-eva_k-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-eva_k-x-low.tar.gz -- <<: *piper + sha256: 81b305abc58a0a02629aea01904a86ec97b823714dd66b1ee22f38fe529e6371 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-karlsson-low overrides: @@ -1394,7 +1363,8 @@ files: - filename: voice-de-karlsson-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-karlsson-low.tar.gz -- <<: *piper + sha256: cc7615cfef3ee6beaa1db6059e0271e4d2e1d6d310c0e17b3d36c494628f4b82 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-kerstin-low overrides: @@ -1403,7 +1373,8 @@ files: - filename: voice-de-kerstin-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-kerstin-low.tar.gz -- <<: *piper + sha256: d8ea72fbc0c21db828e901777ba7bb5dff7c843bb943ad19f34c9700b96a8182 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-pavoque-low overrides: @@ -1412,7 +1383,8 @@ files: - filename: voice-de-pavoque-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-pavoque-low.tar.gz -- <<: *piper + sha256: 1f5ebc6398e8829f19c7c2b14f46307703bca0f0d8c74b4bb173037b1f161d4d +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-ramona-low overrides: @@ -1421,139 +1393,138 @@ files: - filename: voice-de-ramona-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-ramona-low.tar.gz -- <<: *piper + sha256: 66d9fc08d1a1c537a1cefe99a284f687e5ad7e43d5935a75390678331cce7b47 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-de-thorsten-low - overrides: parameters: model: de-thorsten-low.onnx files: - filename: voice-de-thorsten-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-de-thorsten-low.tar.gz -- <<: *piper + sha256: 4d052a7726b77719d0dbc66c845f1d0fe4432bfbd26f878f6dd0883d49e9e43d +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-el-gr-rapunzelina-low - overrides: parameters: model: el-gr-rapunzelina-low.onnx files: - filename: voice-el-gr-rapunzelina-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-el-gr-rapunzelina-low.tar.gz -- <<: *piper + sha256: c5613688c12eabc5294465494ed56af1e0fe4d7896d216bfa470eb225d9ff0d0 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-gb-alan-low - overrides: parameters: model: en-gb-alan-low.onnx files: - filename: voice-en-gb-alan-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-gb-alan-low.tar.gz -- <<: *piper + sha256: 526eeeeccb26206dc92de5965615803b5bf88df059f46372caa4a9fa12d76a32 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-gb-southern_english_female-low - overrides: parameters: model: en-gb-southern_english files: - filename: voice-en-gb-southern_english_female-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-gb-southern_english_female-low.tar.gz -- <<: *piper + sha256: 7c1bbe23e61a57bdb450b137f69a83ff5358159262e1ed7d2308fa14f4924da9 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-amy-low - overrides: parameters: model: en-us-amy-low.onnx files: - filename: voice-en-us-amy-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz -- <<: *piper + sha256: 5c3e3480e7d71ce219943c8a711bb9c21fd48b8f8e87ed7fb5c6649135ab7608 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-danny-low - overrides: parameters: model: en-us-danny-low.onnx files: - filename: voice-en-us-danny-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-danny-low.tar.gz -- <<: *piper + sha256: 0c8fbb42526d5fbd3a0bded5f18041c0a893a70a7fb8756f97866624b932264b +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-kathleen-low - overrides: parameters: model: en-us-kathleen-low.onnx files: - filename: voice-en-us-kathleen-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-kathleen-low.tar.gz -- <<: *piper + sha256: 18e32f009f864d8061af8a4be4ae9018b5aa8b49c37f9e108bbfd782c6a38fbf +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-lessac-low - overrides: parameters: model: en-us-lessac-low.onnx files: - filename: voice-en-us-lessac-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-lessac-low.tar.gz -- <<: *piper + sha256: 003fe040985d00b917ace21b2ccca344c282c53fe9b946991b7b0da52516e1fc +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-lessac-medium - overrides: parameters: model: en-us-lessac-medium.onnx files: - filename: voice-en-us-lessac-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-lessac-medium.tar.gz -- <<: *piper + sha256: d45ca50084c0558eb9581cd7d26938043bc8853513da47c63b94d95a2367a5c9 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-libritts-high - overrides: parameters: model: en-us-libritts-high.onnx files: - filename: voice-en-us-libritts-high.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-libritts-high.tar.gz -- <<: *piper + sha256: 328e3e9cb573a43a6c5e1aeca386e971232bdb1418a74d4674cf726c973a0ea8 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-ryan-high - overrides: parameters: model: en-us-ryan-high.onnx files: - filename: voice-en-us-ryan-high.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-ryan-high.tar.gz -- <<: *piper + sha256: de346b054703a190782f49acb9b93c50678a884fede49cfd85429d204802d678 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-ryan-low - overrides: parameters: model: en-us-ryan-low.onnx files: - filename: voice-en-us-ryan-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-ryan-low.tar.gz - -- <<: *piper + sha256: 049e6e5bad07870fb1d25ecde97bac00f9c95c90589b2fef4b0fbf23c88770ce +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us-ryan-medium - overrides: parameters: model: en-us-ryan-medium.onnx files: - filename: voice-en-us-ryan-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-ryan-medium.tar.gz - -- <<: *piper + sha256: 2e00d747eaed6ce9f63f4991921ef3bb2bbfbc7f28cde4f14eb7048960f928d8 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-en-us_lessac overrides: @@ -1562,8 +1533,8 @@ files: - filename: voice-en-us_lessac.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us_lessac.tar.gz - -- <<: *piper + sha256: 0967af67fb0435aa509b0b794c0cb2cc57817ae8a5bff28cb8cd89ab6f5dcc3d +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-es-carlfm-x-low overrides: @@ -1572,355 +1543,324 @@ files: - filename: voice-es-carlfm-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-es-carlfm-x-low.tar.gz - -- <<: *piper + sha256: 0156a186de321639e6295521f667758ad086bc8433f0a6797a9f044ed5cf5bf3 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-es-mls_10246-low - overrides: parameters: model: es-mls_10246-low.onnx files: - filename: voice-es-mls_10246-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-es-mls_10246-low.tar.gz - -- <<: *piper + sha256: ff1fe3fc2ab91e32acd4fa8cb92048e3cff0e20079b9d81324f01cd2dea50598 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-es-mls_9972-low - overrides: parameters: model: es-mls_9972-low.onnx files: - filename: voice-es-mls_9972-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-es-mls_9972-low.tar.gz - -- <<: *piper + sha256: d95def9adea97a6a3fee7645d1167e00fb4fd60f8ce9bc3ebf1acaa9e3f455dc +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-fi-harri-low - overrides: parameters: model: fi-harri-low.onnx files: - filename: voice-fi-harri-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-fi-harri-low.tar.gz - -- <<: *piper + sha256: 4f1aaf00927d0eb25bf4fc5ef8be2f042e048593864ac263ee7b49c516832b22 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-fr-gilles-low - overrides: parameters: model: fr-gilles-low.onnx files: - filename: voice-fr-gilles-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-fr-gilles-low.tar.gz - -- <<: *piper + sha256: 77662c7332c2a6f522ab478287d9b0fe9afc11a2da71f310bf923124ee699aae +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-fr-mls_1840-low - overrides: parameters: model: fr-mls_1840-low.onnx files: - filename: voice-fr-mls_1840-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-fr-mls_1840-low.tar.gz - -- <<: *piper + sha256: 69169d1fac99a733112c08c7caabf457055990590a32ee83ebcada37f86132d3 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-fr-siwis-low - overrides: parameters: model: fr-siwis-low.onnx files: - filename: voice-fr-siwis-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-fr-siwis-low.tar.gz - -- <<: *piper + sha256: d3db8d47053e9b4108e1c1d29d5ea2b5b1a152183616c3134c222110ccde20f2 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-fr-siwis-medium - overrides: parameters: model: fr-siwis-medium.onnx files: - filename: voice-fr-siwis-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-fr-siwis-medium.tar.gz - -- <<: *piper + sha256: 0c9ecdf9ecac6de4a46be85a162bffe0db7145bd3a4175831cea6cab4b41eefd +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-is-bui-medium - overrides: parameters: model: is-bui-medium.onnx files: - filename: voice-is-bui-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-is-bui-medium.tar.gz - -- <<: *piper + sha256: e89ef01051cb48ca2a32338ed8749a4c966b912bb572c61d6d21f2d3822e505f +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-is-salka-medium - overrides: parameters: model: is-salka-medium.onnx files: - filename: voice-is-salka-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-is-salka-medium.tar.gz - -- <<: *piper + sha256: 75923d7d6b4125166ca58ec82b5d23879012844483b428db9911e034e6626384 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-is-steinn-medium - overrides: parameters: model: is-steinn-medium.onnx files: - filename: voice-is-steinn-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-is-steinn-medium.tar.gz - -- <<: *piper + sha256: 5a01a8df796f86fdfe12cc32a3412ebd83670d47708d94d926ba5ed0776e6dc9 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-is-ugla-medium - overrides: parameters: model: is-ugla-medium.onnx files: - filename: voice-is-ugla-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-is-ugla-medium.tar.gz - -- <<: *piper + sha256: 501cd0376f7fd397f394856b7b3d899da4cc40a63e11912258b74da78af90547 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-it-riccardo_fasol-x-low - overrides: parameters: model: it-riccardo_fasol-x-low.onnx files: - filename: voice-it-riccardo_fasol-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-it-riccardo_fasol-x-low.tar.gz - -- <<: *piper + sha256: 394b27b8780f5167e73a62ac103839cc438abc7edb544192f965e5b8f5f4acdb +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-kk-iseke-x-low - overrides: parameters: model: kk-iseke-x-low.onnx files: - filename: voice-kk-iseke-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-kk-iseke-x-low.tar.gz - -- <<: *piper + sha256: f434fffbea3e6d8cf392e44438a1f32a5d005fc93b41be84a6d663882ce7c074 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-kk-issai-high - overrides: parameters: model: kk-issai-high.onnx files: - filename: voice-kk-issai-high.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-kk-issai-high.tar.gz - -- <<: *piper + sha256: 84bf79d330d6cd68103e82d95bbcaa2628a99a565126dea94cea2be944ed4f32 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-kk-raya-x-low - overrides: parameters: model: kk-raya-x-low.onnx files: - filename: voice-kk-raya-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-kk-raya-x-low.tar.gz - -- <<: *piper + sha256: 4cab4ce00c6f10450b668072d7980a2bc3ade3a39adee82e3ec4f519d4c57bd1 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-ne-google-medium - overrides: parameters: model: ne-google-medium.onnx files: - filename: voice-ne-google-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-ne-google-medium.tar.gz - -- <<: *piper + sha256: 0895b11a7a340baea37fb9c27fb50bc3fd0af9779085978277f962d236d3a7bd +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-ne-google-x-low - overrides: parameters: model: ne-google-x-low.onnx files: - filename: voice-ne-google-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-ne-google-x-low.tar.gz - -- <<: *piper + sha256: 870ba5718dfe3e478c6cce8a9a288b591b3575c750b57ffcd845e4ec64988f0b +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-nl-mls_5809-low - overrides: parameters: model: nl-mls_5809-low.onnx files: - filename: voice-nl-mls_5809-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-nl-mls_5809-low.tar.gz - -- <<: *piper + sha256: 398b9f0318dfe9d613cb066444efec0d8491905ae34cf502edb52030b75ef51c +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-nl-mls_7432-low - overrides: parameters: model: nl-mls_7432-low.onnx files: - filename: voice-nl-mls_7432-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-nl-mls_7432-low.tar.gz - -- <<: *piper + sha256: 0b3efc68ea7e735ba8f2e0a0f7e9b4b887b00f6530c02fca4aa69a6091adbe5e +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-nl-nathalie-x-low - overrides: parameters: model: nl-nathalie-x-low.onnx files: - filename: voice-nl-nathalie-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-nl-nathalie-x-low.tar.gz - -- <<: *piper + sha256: 2658d4fe2b791491780160216d187751f7c993aa261f3b8ec76dfcaf1ba74942 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-nl-rdh-medium - overrides: parameters: model: nl-rdh-medium.onnx files: - filename: voice-nl-rdh-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-nl-rdh-medium.tar.gz - -- <<: *piper + sha256: 16f74a195ecf13df1303fd85327532196cc1ecef2e72505200578fd410d0affb +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-nl-rdh-x-low - overrides: parameters: model: nl-rdh-x-low.onnx files: - filename: voice-nl-rdh-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-nl-rdh-x-low.tar.gz - -- <<: *piper + sha256: 496363e5d6e080fd16ac5a1f9457c564b52f0ee8be7f2e2ba1dbf41ef0b23a39 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-no-talesyntese-medium - overrides: parameters: model: no-talesyntese-medium.onnx files: - filename: voice-no-talesyntese-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-no-talesyntese-medium.tar.gz - -- <<: *piper + sha256: ed6b3593a0e70c90d52e225b85d7e0b805ad8e08482471bd2f73cf1404a6470d +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-pl-mls_6892-low - overrides: parameters: model: pl-mls_6892-low.onnx files: - filename: voice-pl-mls_6892-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-pl-mls_6892-low.tar.gz - -- <<: *piper + sha256: 5361fcf586b1285025a2ccb8b7500e07c9d66fa8126ef518709c0055c4c0d6f4 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-pt-br-edresson-low - overrides: parameters: model: pt-br-edresson-low.onnx files: - filename: voice-pt-br-edresson-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-pt-br-edresson-low.tar.gz - -- <<: *piper + sha256: c68be522a526e77f49e90eeb4c13c01b4acdfeb635759f0eeb0eea8f16fd1f33 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-ru-irinia-medium - overrides: parameters: model: ru-irinia-medium.onnx files: - filename: voice-ru-irinia-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-ru-irinia-medium.tar.gz - -- <<: *piper + sha256: 897b62f170faee38f21d0bc36411164166ae351977e898b6cf33f6206890b55f +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-sv-se-nst-medium - overrides: parameters: model: sv-se-nst-medium.onnx files: - filename: voice-sv-se-nst-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-sv-se-nst-medium.tar.gz - -- <<: *piper + sha256: 0d6cf357d55860162bf1bdd76bd4f0c396ff547e941bfb25df799d6f1866fda9 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-uk-lada-x-low - overrides: parameters: model: uk-lada-x-low.onnx files: - filename: voice-uk-lada-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-uk-lada-x-low.tar.gz - -- <<: *piper + sha256: ff50acbd659fc226b57632acb1cee310009821ec44b4bc517effdd9827d8296b +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-vi-25hours-single-low - overrides: parameters: model: vi-25hours-single-low.onnx files: - filename: voice-vi-25hours-single-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-vi-25hours-single-low.tar.gz - -- <<: *piper + sha256: 97e34d1b69dc7000a4ec3269f84339ed35905b3c9800a63da5d39b7649e4a666 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-vi-vivos-x-low - overrides: parameters: model: vi-vivos-x-low.onnx files: - filename: voice-vi-vivos-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-vi-vivos-x-low.tar.gz - -- <<: *piper + sha256: 07cd4ca6438ec224012f7033eec1a2038724b78e4aa2bedf85f756656b52e1a7 +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-zh-cn-huayan-x-low - overrides: parameters: model: zh-cn-huayan-x-low.onnx files: - filename: voice-zh-cn-huayan-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-zh-cn-huayan-x-low.tar.gz - -- <<: *piper + sha256: 609db0da8ee75beb2f17ce53c55abdbc8c0e04135482efedf1798b1938bf90fa +- !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-zh_CN-huayan-medium - overrides: parameters: model: zh_CN-huayan-medium.onnx files: - filename: voice-zh_CN-huayan-medium.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-zh_CN-huayan-medium.tar.gz + sha256: 0299a5e7f481ba853404e9f0e1515a94d5409585d76963fa4d30c64bd630aa99