diff --git a/.vscode/settings.json b/.vscode/settings.json index 18a1498..7df0d5e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -32,6 +32,7 @@ "HEUN", "hhmmss", "Hmmss", + "inferencing", "inpainting", "isnot", "jakemorrison", diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index a36a9b5..109c623 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.2.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.22.0] - **BREAKING CHANGES** +## [0.26.0] - **BREAKING CHANGES** - Module changes: - stability.ai @@ -22,7 +22,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Cohere - `Invoke-CohereCommandRModel` - minor corrections to debug output - Meta - - `Invoke-MetaModel` - minor corrections to debug output and help + - `Invoke-MetaModel` + - Added support for new 3.2 models: `meta.llama3-2-1b-instruct-v1:0`, `meta.llama3-2-3b-instruct-v1:0`, `meta.llama3-2-11b-instruct-v1:0`, `meta.llama3-2-90b-instruct-v1:0` + - minor corrections to debug output and help - Mistral - `Invoke-MistralAIModel` - minor corrections to debug output and help. Adjusted Max token limit validation. - Build changes: diff --git a/docs/Invoke-MetaModel.md b/docs/Invoke-MetaModel.md index 02d06b6..02fed0d 100644 --- a/docs/Invoke-MetaModel.md +++ b/docs/Invoke-MetaModel.md @@ -376,3 +376,11 @@ Author: Jake Morrison - @jakemorrison - https://www.techthoughts.info/ [https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/](https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/) [https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md](https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md) + +[https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md) + +[https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md) + +[https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/vision_prompt_format.md](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/vision_prompt_format.md) + +[https://www.llama.com/docs/how-to-guides/vision-capabilities/](https://www.llama.com/docs/how-to-guides/vision-capabilities/) diff --git a/docs/pwshBedrock.md b/docs/pwshBedrock.md index f084513..fa2ba1f 100644 --- a/docs/pwshBedrock.md +++ b/docs/pwshBedrock.md @@ -2,7 +2,7 @@ Module Name: pwshBedrock Module Guid: b4f9e4dc-0229-44ef-99a1-08be4c5e81f2 Download Help Link: NA -Help Version: 0.22.0 +Help Version: 0.26.0 Locale: en-US --- diff --git a/src/Tests/Integration/ConverseAPI.Tests.ps1 b/src/Tests/Integration/ConverseAPI.Tests.ps1 index 6500960..156bee6 100644 --- a/src/Tests/Integration/ConverseAPI.Tests.ps1 +++ b/src/Tests/Integration/ConverseAPI.Tests.ps1 @@ -37,6 +37,10 @@ InModuleScope 'pwshBedrock' { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', diff --git a/src/Tests/Integration/SupportedModels-Checks.Tests.ps1 b/src/Tests/Integration/SupportedModels-Checks.Tests.ps1 index d089e77..e7b0ce7 100644 --- a/src/Tests/Integration/SupportedModels-Checks.Tests.ps1 +++ b/src/Tests/Integration/SupportedModels-Checks.Tests.ps1 @@ -32,6 +32,10 @@ Describe 'Supported Models Checks' -Tag Integration { 'meta.llama3-1-8b-instruct-v1:0' 'meta.llama3-1-70b-instruct-v1:0' 'meta.llama3-1-405b-instruct-v1:0' + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2' 'mistral.mistral-large-2402-v1:0' 'mistral.mistral-large-2407-v1:0', diff --git a/src/Tests/Unit/Public/Get-ModelContext.Tests.ps1 b/src/Tests/Unit/Public/Get-ModelContext.Tests.ps1 index d86bf67..1f79a26 100644 --- a/src/Tests/Unit/Public/Get-ModelContext.Tests.ps1 +++ b/src/Tests/Unit/Public/Get-ModelContext.Tests.ps1 @@ -167,6 +167,22 @@ InModuleScope 'pwshBedrock' { ModelId = 'meta.llama3-1-405b-instruct-v1:0' Context = 'test' } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-1b-instruct-v1:0' + Context = 'test' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-3b-instruct-v1:0' + Context = 'test' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-11b-instruct-v1:0' + Context = 'test' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-90b-instruct-v1:0' + Context = 'test' + } [PSCustomObject]@{ ModelId = 'mistral.mistral-7b-instruct-v0:2' Context = 'test' diff --git a/src/Tests/Unit/Public/Get-ModelTally.Tests.ps1 b/src/Tests/Unit/Public/Get-ModelTally.Tests.ps1 index 68ae2e2..ac6b3ff 100644 --- a/src/Tests/Unit/Public/Get-ModelTally.Tests.ps1 +++ b/src/Tests/Unit/Public/Get-ModelTally.Tests.ps1 @@ -43,7 +43,7 @@ InModuleScope 'pwshBedrock' { It 'should get the tally for all models' { $eval = Get-ModelTally -AllModels - $eval.Count | Should -BeExactly 35 + $eval.Count | Should -BeExactly 39 foreach ($model in $eval) { if ($null -ne $model.ImageCount) { $model.ImageCount | Should -BeExactly 0 diff --git a/src/Tests/Unit/Public/Reset-ModelContext.Tests.ps1 b/src/Tests/Unit/Public/Reset-ModelContext.Tests.ps1 index 7b94f5b..25ccce0 100644 --- a/src/Tests/Unit/Public/Reset-ModelContext.Tests.ps1 +++ b/src/Tests/Unit/Public/Reset-ModelContext.Tests.ps1 @@ -401,6 +401,62 @@ User: "Hi there! } ) } + [PSCustomObject]@{ + ModelID = 'meta.llama3-2-1b-instruct-v1:0' + Context = @( + [PSCustomObject]@{ + role = 'user' + content = @( + [PSCustomObject]@{ + type = 'text' + text = 'Llama3 2 1b instruct v1 context' + } + ) + } + ) + } + [PSCustomObject]@{ + ModelID = 'meta.llama3-2-3b-instruct-v1:0' + Context = @( + [PSCustomObject]@{ + role = 'user' + content = @( + [PSCustomObject]@{ + type = 'text' + text = 'Llama3 2 3b instruct v1 context' + } + ) + } + ) + } + [PSCustomObject]@{ + ModelID = 'meta.llama3-2-11b-instruct-v1:0' + Context = @( + [PSCustomObject]@{ + role = 'user' + content = @( + [PSCustomObject]@{ + type = 'text' + text = 'Llama3 2 11b instruct v1 context' + } + ) + } + ) + } + [PSCustomObject]@{ + ModelID = 'meta.llama3-2-90b-instruct-v1:0' + Context = @( + [PSCustomObject]@{ + role = 'user' + content = @( + [PSCustomObject]@{ + type = 'text' + text = 'Llama3 2 90b instruct v1 context' + } + ) + } + ) + } [PSCustomObject]@{ ModelID = 'mistral.mistral-7b-instruct-v0:2' Context = @( diff --git a/src/pwshBedrock/Imports.ps1 b/src/pwshBedrock/Imports.ps1 index b9efca5..7291064 100644 --- a/src/pwshBedrock/Imports.ps1 +++ b/src/pwshBedrock/Imports.ps1 @@ -211,6 +211,38 @@ $Global:pwshBedRockSessionModelTally = @( InputTokenCost = 0 OutputTokenCost = 0 } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-1b-instruct-v1:0' + TotalCost = 0 + InputTokenCount = 0 + OutputTokenCount = 0 + InputTokenCost = 0 + OutputTokenCost = 0 + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-3b-instruct-v1:0' + TotalCost = 0 + InputTokenCount = 0 + OutputTokenCount = 0 + InputTokenCost = 0 + OutputTokenCost = 0 + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-11b-instruct-v1:0' + TotalCost = 0 + InputTokenCount = 0 + OutputTokenCount = 0 + InputTokenCost = 0 + OutputTokenCost = 0 + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-90b-instruct-v1:0' + TotalCost = 0 + InputTokenCount = 0 + OutputTokenCount = 0 + InputTokenCost = 0 + OutputTokenCost = 0 + } [PSCustomObject]@{ ModelId = 'mistral.mistral-7b-instruct-v0:2' TotalCost = 0 @@ -382,6 +414,22 @@ $Global:pwshBedrockModelContext = @( ModelId = 'meta.llama3-1-405b-instruct-v1:0' Context = '' } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-1b-instruct-v1:0' + Context = '' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-3b-instruct-v1:0' + Context = '' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-11b-instruct-v1:0' + Context = '' + } + [PSCustomObject]@{ + ModelId = 'meta.llama3-2-90b-instruct-v1:0' + Context = '' + } [PSCustomObject]@{ ModelId = 'mistral.mistral-7b-instruct-v0:2' Context = '' @@ -1039,6 +1087,90 @@ $script:metaModelInfo = @( InputTokenCost = 0.00532 OutputTokenCost = 0.016 } + [PSCustomObject]@{ + ProviderName = 'Meta' + ModelName = 'Llama 3.2 1B Instruct' + ModelId = 'meta.llama3-2-1b-instruct-v1:0' + Description = 'The most lightweight model in the Llama 3.2 collection of models, perfect for retrieval and summarization for edge devices and mobile applications.' + Strength = 'ideal for the following use cases: personal information management and multilingual knowledge retrieval.' + Multilingual = $true + Text = $true + Document = $true + Vision = $false + SystemPrompt = $true + ToolUse = $false + ResponseStreamingSupported = $true + ChatHistorySupported = $true + ContextWindow = 128000 + MaxOutput = 4096 + TrainingCutoff = '12-01-2023' + PayloadLimit = '' + InputTokenCost = 0.0001 + OutputTokenCost = 0.0001 + } + [PSCustomObject]@{ + ProviderName = 'Meta' + ModelName = 'Llama 3.2 3B Instruct' + ModelId = 'meta.llama3-2-3b-instruct-v1:0' + Description = 'Designed for applications requiring low-latency inferencing and limited computational resources.' + Strength = 'excels at text summarization, classification, and language translation tasks. This model is ideal for the following use cases: mobile AI-powered writing assistants and customer service applications.' + Multilingual = $true + Text = $true + Document = $true + Vision = $false + SystemPrompt = $true + ToolUse = $false + ResponseStreamingSupported = $true + ChatHistorySupported = $true + ContextWindow = 128000 + MaxOutput = 4096 + TrainingCutoff = '12-01-2023' + PayloadLimit = '' + InputTokenCost = 0.00015 + OutputTokenCost = 0.00015 + } + [PSCustomObject]@{ + ProviderName = 'Meta' + ModelName = 'Llama 3.2 11B Instruct' + ModelId = 'meta.llama3-2-11b-instruct-v1:0' + Description = 'Well-suited for content creation, conversational AI, language understanding, and enterprise applications requiring visual reasoning.' + Strength = 'The model demonstrates strong performance in text summarization, sentiment analysis, code generation, and following instructions, with the added ability to reason about images. This model use cases are similar to the 90B version: image captioning, image-text-retrieval, visual grounding, visual question answering and visual reasoning, and document visual question answering.' + Multilingual = $true + Text = $true + Document = $true + Vision = $true + SystemPrompt = $true + ToolUse = $false + ResponseStreamingSupported = $true + ChatHistorySupported = $true + ContextWindow = 128000 + MaxOutput = 4096 + TrainingCutoff = '12-01-2023' + PayloadLimit = '' + InputTokenCost = 0.00035 + OutputTokenCost = 0.00035 + } + [PSCustomObject]@{ + ProviderName = 'Meta' + ModelName = 'Llama 3.2 90B Instruct' + ModelId = 'meta.llama3-2-90b-instruct-v1:0' + Description = "Meta's most advanced model, ideal for enterprise-level applications." + Strength = 'excels at general knowledge, long-form text generation, multilingual translation, coding, math, and advanced reasoning. It also introduces image reasoning capabilities, allowing for image understanding and visual reasoning tasks. This model is ideal for the following use cases: image captioning, image-text retrieval, visual grounding, visual question answering and visual reasoning, and document visual question answering.' + Multilingual = $true + Text = $true + Document = $true + Vision = $true + SystemPrompt = $true + ToolUse = $false + ResponseStreamingSupported = $true + ChatHistorySupported = $true + ContextWindow = 128000 + MaxOutput = 4096 + TrainingCutoff = '12-01-2023' + PayloadLimit = '' + InputTokenCost = 0.002 + OutputTokenCost = 0.002 + } ) #metaModelInfo #endregion diff --git a/src/pwshBedrock/Private/Add-ModelCostEstimate.ps1 b/src/pwshBedrock/Private/Add-ModelCostEstimate.ps1 index 0e2d88f..e9c7371 100644 --- a/src/pwshBedrock/Private/Add-ModelCostEstimate.ps1 +++ b/src/pwshBedrock/Private/Add-ModelCostEstimate.ps1 @@ -80,6 +80,10 @@ function Add-ModelCostEstimate { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', @@ -208,6 +212,22 @@ function Add-ModelCostEstimate { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } + 'meta.llama3-2-1b-instruct-v1:0' { + $inputTokenCount = $Usage.prompt_token_count + $outputTokenCount = $Usage.generation_token_count + } + 'meta.llama3-2-3b-instruct-v1:0' { + $inputTokenCount = $Usage.prompt_token_count + $outputTokenCount = $Usage.generation_token_count + } + 'meta.llama3-2-11b-instruct-v1:0' { + $inputTokenCount = $Usage.prompt_token_count + $outputTokenCount = $Usage.generation_token_count + } + 'meta.llama3-2-90b-instruct-v1:0' { + $inputTokenCount = $Usage.prompt_token_count + $outputTokenCount = $Usage.generation_token_count + } 'mistral.mistral-7b-instruct-v0:2' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text diff --git a/src/pwshBedrock/Private/Format-MetaTextMessage.ps1 b/src/pwshBedrock/Private/Format-MetaTextMessage.ps1 index 9df128e..56f8b1f 100644 --- a/src/pwshBedrock/Private/Format-MetaTextMessage.ps1 +++ b/src/pwshBedrock/Private/Format-MetaTextMessage.ps1 @@ -60,7 +60,11 @@ function Format-MetaTextMessage { 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', - 'meta.llama3-1-405b-instruct-v1:0' + 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0' )] [string]$ModelID, diff --git a/src/pwshBedrock/Public/Get-ModelContext.ps1 b/src/pwshBedrock/Public/Get-ModelContext.ps1 index 6fa1215..ca051f4 100644 --- a/src/pwshBedrock/Public/Get-ModelContext.ps1 +++ b/src/pwshBedrock/Public/Get-ModelContext.ps1 @@ -60,6 +60,10 @@ function Get-ModelContext { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', diff --git a/src/pwshBedrock/Public/Get-ModelCostEstimate.ps1 b/src/pwshBedrock/Public/Get-ModelCostEstimate.ps1 index 42deaa5..1873c4f 100644 --- a/src/pwshBedrock/Public/Get-ModelCostEstimate.ps1 +++ b/src/pwshBedrock/Public/Get-ModelCostEstimate.ps1 @@ -92,6 +92,10 @@ function Get-ModelCostEstimate { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', diff --git a/src/pwshBedrock/Public/Get-ModelInfo.ps1 b/src/pwshBedrock/Public/Get-ModelInfo.ps1 index 5530521..9d6a3eb 100644 --- a/src/pwshBedrock/Public/Get-ModelInfo.ps1 +++ b/src/pwshBedrock/Public/Get-ModelInfo.ps1 @@ -69,6 +69,10 @@ function Get-ModelInfo { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', diff --git a/src/pwshBedrock/Public/Get-ModelTally.ps1 b/src/pwshBedrock/Public/Get-ModelTally.ps1 index 6170319..ebea044 100644 --- a/src/pwshBedrock/Public/Get-ModelTally.ps1 +++ b/src/pwshBedrock/Public/Get-ModelTally.ps1 @@ -72,6 +72,10 @@ function Get-ModelTally { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', diff --git a/src/pwshBedrock/Public/Invoke-ConverseAPI.ps1 b/src/pwshBedrock/Public/Invoke-ConverseAPI.ps1 index 758d362..3eb6f2a 100644 --- a/src/pwshBedrock/Public/Invoke-ConverseAPI.ps1 +++ b/src/pwshBedrock/Public/Invoke-ConverseAPI.ps1 @@ -278,6 +278,10 @@ function Invoke-ConverseAPI { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', diff --git a/src/pwshBedrock/Public/Invoke-MetaModel.ps1 b/src/pwshBedrock/Public/Invoke-MetaModel.ps1 index 4576127..c8cde31 100644 --- a/src/pwshBedrock/Public/Invoke-MetaModel.ps1 +++ b/src/pwshBedrock/Public/Invoke-MetaModel.ps1 @@ -97,6 +97,14 @@ https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/ .LINK https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md +.LINK + https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD.md +.LINK + https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/MODEL_CARD_VISION.md +.LINK + https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/vision_prompt_format.md +.LINK + https://www.llama.com/docs/how-to-guides/vision-capabilities/ #> function Invoke-MetaModel { [CmdletBinding()] @@ -119,7 +127,11 @@ function Invoke-MetaModel { 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', - 'meta.llama3-1-405b-instruct-v1:0' + 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0' )] [string]$ModelID, diff --git a/src/pwshBedrock/Public/Reset-ModelContext.ps1 b/src/pwshBedrock/Public/Reset-ModelContext.ps1 index 8bb4e9a..694f5ab 100644 --- a/src/pwshBedrock/Public/Reset-ModelContext.ps1 +++ b/src/pwshBedrock/Public/Reset-ModelContext.ps1 @@ -66,6 +66,10 @@ function Reset-ModelContext { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', @@ -146,6 +150,11 @@ function Reset-ModelContext { $model -eq 'meta.llama3-70b-instruct-v1:0' -or $model -eq 'meta.llama3-1-8b-instruct-v1:0' -or $model -eq 'meta.llama3-1-70b-instruct-v1:0' -or + $model -eq 'meta.llama3-1-405b-instruct-v1:0' -or + $model -eq 'meta.llama3-2-1b-instruct-v1:0' -or + $model -eq 'meta.llama3-2-3b-instruct-v1:0' -or + $model -eq 'meta.llama3-2-11b-instruct-v1:0' -or + $model -eq 'meta.llama3-2-90b-instruct-v1:0' -or $model -eq 'mistral.mistral-7b-instruct-v0:2' -or $model -eq 'mistral.mixtral-8x7b-instruct-v0:1' -or $model -eq 'mistral.mistral-large-2402-v1:0' -or diff --git a/src/pwshBedrock/Public/Reset-ModelTally.ps1 b/src/pwshBedrock/Public/Reset-ModelTally.ps1 index b17aa11..ff2bc60 100644 --- a/src/pwshBedrock/Public/Reset-ModelTally.ps1 +++ b/src/pwshBedrock/Public/Reset-ModelTally.ps1 @@ -70,6 +70,10 @@ function Reset-ModelTally { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', diff --git a/src/pwshBedrock/Public/Save-ModelContext.ps1 b/src/pwshBedrock/Public/Save-ModelContext.ps1 index b049fe3..ff88bad 100644 --- a/src/pwshBedrock/Public/Save-ModelContext.ps1 +++ b/src/pwshBedrock/Public/Save-ModelContext.ps1 @@ -58,6 +58,10 @@ function Save-ModelContext { 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'meta.llama3-1-405b-instruct-v1:0', + 'meta.llama3-2-1b-instruct-v1:0', + 'meta.llama3-2-3b-instruct-v1:0', + 'meta.llama3-2-11b-instruct-v1:0', + 'meta.llama3-2-90b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', diff --git a/src/pwshBedrock/pwshBedrock.psd1 b/src/pwshBedrock/pwshBedrock.psd1 index c768480..9f7f6f1 100644 --- a/src/pwshBedrock/pwshBedrock.psd1 +++ b/src/pwshBedrock/pwshBedrock.psd1 @@ -12,7 +12,7 @@ RootModule = 'pwshBedrock.psm1' # Version number of this module. - ModuleVersion = '0.22.0' + ModuleVersion = '0.26.0' # Supported PSEditions # CompatiblePSEditions = @()