diff --git a/nvidia_models/intro/.env.example b/nvidia_models/intro/.env.example new file mode 100644 index 00000000..f3af9034 --- /dev/null +++ b/nvidia_models/intro/.env.example @@ -0,0 +1,2 @@ +NVIDIA_API_KEY= +MODEL=meta/llama-3.1-8b-instruct \ No newline at end of file diff --git a/nvidia_models/intro/.gitignore b/nvidia_models/intro/.gitignore new file mode 100644 index 00000000..ca67f274 --- /dev/null +++ b/nvidia_models/intro/.gitignore @@ -0,0 +1,6 @@ +.env +.DS_Store +__pycache__ +.venv +poetry.lock +.ruff_cache \ No newline at end of file diff --git a/nvidia_models/intro/Makefile b/nvidia_models/intro/Makefile new file mode 100644 index 00000000..6744bfe9 --- /dev/null +++ b/nvidia_models/intro/Makefile @@ -0,0 +1,40 @@ +.PHONY: all format lint test tests integration_tests help + +# Default target executed when no arguments are given to make. +all: help + +install: ## Install the poetry environment and dependencies + poetry install --no-root + +clean: ## Clean up cache directories and build artifacts + find . -type d -name "__pycache__" -exec rm -rf {} + + find . -type d -name "*.pyc" -exec rm -rf {} + + find . -type d -name ".ruff_cache" -exec rm -rf {} + + find . -type d -name ".pytest_cache" -exec rm -rf {} + + find . -type d -name ".coverage" -exec rm -rf {} + + rm -rf dist/ + rm -rf build/ + +###################### +# LINTING AND FORMATTING +###################### + +# Define a variable for Python and notebook files. +PYTHON_FILES=. +MYPY_CACHE=.mypy_cache +lint: ## Run code quality tools + poetry run ruff check $(PYTHON_FILES) + poetry run ruff format $(PYTHON_FILES) --check + +format: ## Format code using ruff + poetry run ruff format $(PYTHON_FILES) + poetry run ruff check $(PYTHON_FILES) --fix + +###################### +# HELP +###################### + +help: + @echo '----' + @echo 'format - run code formatters' + @echo 'lint - run linters' diff --git a/nvidia_models/intro/README.md b/nvidia_models/intro/README.md new file mode 100644 index 00000000..2cedbb56 --- /dev/null +++ b/nvidia_models/intro/README.md @@ -0,0 +1,15 @@ +# AI Crew using NVIDIA NIM Endpoint + +## Introduction +This is a simple example using the CrewAI framework with an NVIDIA endpoint and langchain-nvidia-ai-endpoints integration. + +## Running the Script +This example uses the Azure OpenAI API to call a model. + +- **Configure Environment**: Set NVIDIA_API_KEY to appropriate api key. + Set MODEL to select appropriate model +- **Install Dependencies**: Run `make install`. +- **Execute the Script**: Run `python main.py` to see a list of recommended changes to this document. + +## Details & Explanation +- **Running the Script**: Execute `python main.py`. The script will leverage the CrewAI framework to process the specified file and return a list of changes. \ No newline at end of file diff --git a/nvidia_models/intro/main.py b/nvidia_models/intro/main.py new file mode 100644 index 00000000..676d5ebb --- /dev/null +++ b/nvidia_models/intro/main.py @@ -0,0 +1,152 @@ +import logging +import os +from typing import Any, Dict, List, Optional, Union + +import litellm +from crewai import LLM, Agent, Crew, Process, Task +from crewai.utilities.exceptions.context_window_exceeding_exception import ( + LLMContextLengthExceededException, +) +from dotenv import load_dotenv +from langchain_nvidia_ai_endpoints import ChatNVIDIA + +load_dotenv() + + +class nvllm(LLM): + def __init__( + self, + llm: ChatNVIDIA, + model_str: str, + timeout: Optional[Union[float, int]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + n: Optional[int] = None, + stop: Optional[Union[str, List[str]]] = None, + max_completion_tokens: Optional[int] = None, + max_tokens: Optional[int] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[Dict[int, float]] = None, + response_format: Optional[Dict[str, Any]] = None, + seed: Optional[int] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + base_url: Optional[str] = None, + api_version: Optional[str] = None, + api_key: Optional[str] = None, + callbacks: List[Any] = None, + **kwargs, + ): + self.model = model_str + self.timeout = timeout + self.temperature = temperature + self.top_p = top_p + self.n = n + self.stop = stop + self.max_completion_tokens = max_completion_tokens + self.max_tokens = max_tokens + self.presence_penalty = presence_penalty + self.frequency_penalty = frequency_penalty + self.logit_bias = logit_bias + self.response_format = response_format + self.seed = seed + self.logprobs = logprobs + self.top_logprobs = top_logprobs + self.base_url = base_url + self.api_version = api_version + self.api_key = api_key + self.callbacks = callbacks + self.kwargs = kwargs + self.llm = llm + + if callbacks is None: + self.callbacks = callbacks = [] + + self.set_callbacks(callbacks) + + def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = None) -> str: + if callbacks is None: + callbacks = [] + if callbacks and len(callbacks) > 0: + self.set_callbacks(callbacks) + + try: + params = { + "model": self.llm.model, + "input": messages, + "timeout": self.timeout, + "temperature": self.temperature, + "top_p": self.top_p, + "n": self.n, + "stop": self.stop, + "max_tokens": self.max_tokens or self.max_completion_tokens, + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "logit_bias": self.logit_bias, + "response_format": self.response_format, + "seed": self.seed, + "logprobs": self.logprobs, + "top_logprobs": self.top_logprobs, + "api_key": self.api_key, + **self.kwargs, + } + + response = self.llm.invoke(**params) + return response.content + except Exception as e: + if not LLMContextLengthExceededException(str(e))._is_context_limit_error( + str(e) + ): + logging.error(f"LiteLLM call failed: {str(e)}") + + raise # Re-raise the exception after logging + + def set_callbacks(self, callbacks: List[Any]): + callback_types = [type(callback) for callback in callbacks] + for callback in litellm.success_callback[:]: + if type(callback) in callback_types: + litellm.success_callback.remove(callback) + + for callback in litellm._async_success_callback[:]: + if type(callback) in callback_types: + litellm._async_success_callback.remove(callback) + + litellm.callbacks = callbacks + + +model = os.environ.get("MODEL", "meta/llama-3.1-8b-instruct") +llm = ChatNVIDIA(model=model) +default_llm = nvllm(model_str="nvidia_nim/" + model, llm=llm) + +os.environ["NVIDIA_NIM_API_KEY"] = os.environ.get("NVIDIA_API_KEY") + +# Create a researcher agent +researcher = Agent( + role="Senior Researcher", + goal="Discover groundbreaking technologies", + verbose=True, + llm=default_llm, + backstory=( + "A curious mind fascinated by cutting-edge innovation and the potential " + "to change the world, you know everything about tech." + ), +) + +# Task for the researcher +research_task = Task( + description="Identify the next big trend in AI", + agent=researcher, # Assigning the task to the researcher + expected_output="Data Insights", +) + + +# Instantiate your crew +tech_crew = Crew( + agents=[researcher], + tasks=[research_task], + process=Process.sequential, # Tasks will be executed one after the other +) + +# Begin the task execution +tech_crew.kickoff() diff --git a/nvidia_models/intro/pyproject.toml b/nvidia_models/intro/pyproject.toml new file mode 100644 index 00000000..8c7747ae --- /dev/null +++ b/nvidia_models/intro/pyproject.toml @@ -0,0 +1,38 @@ +[tool.poetry] +name = "nvidia-intro-crewai-example" +version = "0.1.0" +description = "" +authors = ["raspawar "] + +[tool.poetry.dependencies] +python = ">=3.10.0,<3.12" +python-dotenv = "1.0.0" +litellm = "^1.52.10" +langchain-nvidia-ai-endpoints = "^0.3.5" +crewai = "^0.80.0" + +[tool.pyright] +# https://github.com/microsoft/pyright/blob/main/docs/configuration.md +useLibraryCodeForTypes = true +exclude = [".cache"] + +[tool.ruff.lint] +select = [ + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "ARG", # flake8-unused-arguments + "SIM", # flake8-simplify + "T201", # print +] +ignore = [ + "W291", # trailing whitespace + "W292", # no newline at end of file + "W293", # blank line contains whitespace +] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/nvidia_models/intro/scripts/check_pydantic.sh b/nvidia_models/intro/scripts/check_pydantic.sh new file mode 100755 index 00000000..06b5bb81 --- /dev/null +++ b/nvidia_models/intro/scripts/check_pydantic.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# This script searches for lines starting with "import pydantic" or "from pydantic" +# in tracked files within a Git repository. +# +# Usage: ./scripts/check_pydantic.sh /path/to/repository + +# Check if a path argument is provided +if [ $# -ne 1 ]; then + echo "Usage: $0 /path/to/repository" + exit 1 +fi + +repository_path="$1" + +# Search for lines matching the pattern within the specified repository +result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic') + +# Check if any matching lines were found +if [ -n "$result" ]; then + echo "ERROR: The following lines need to be updated:" + echo "$result" + echo "Please replace the code with an import from langchain_core.pydantic_v1." + echo "For example, replace 'from pydantic import BaseModel'" + echo "with 'from langchain_core.pydantic_v1 import BaseModel'" + exit 1 +fi diff --git a/nvidia_models/intro/scripts/lint_imports.sh b/nvidia_models/intro/scripts/lint_imports.sh new file mode 100755 index 00000000..695613c7 --- /dev/null +++ b/nvidia_models/intro/scripts/lint_imports.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -eu + +# Initialize a variable to keep track of errors +errors=0 + +# make sure not importing from langchain or langchain_experimental +git --no-pager grep '^from langchain\.' . && errors=$((errors+1)) +git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1)) + +# Decide on an exit status based on the errors +if [ "$errors" -gt 0 ]; then + exit 1 +else + exit 0 +fi diff --git a/nvidia_models/marketing_strategy/.env.example b/nvidia_models/marketing_strategy/.env.example new file mode 100644 index 00000000..224eba9e --- /dev/null +++ b/nvidia_models/marketing_strategy/.env.example @@ -0,0 +1,3 @@ +SERPER_API_KEY= +NVIDIA_API_KEY= +MODEL=meta/llama-3.1-8b-instruct \ No newline at end of file diff --git a/nvidia_models/marketing_strategy/.gitignore b/nvidia_models/marketing_strategy/.gitignore new file mode 100644 index 00000000..ca67f274 --- /dev/null +++ b/nvidia_models/marketing_strategy/.gitignore @@ -0,0 +1,6 @@ +.env +.DS_Store +__pycache__ +.venv +poetry.lock +.ruff_cache \ No newline at end of file diff --git a/nvidia_models/marketing_strategy/Makefile b/nvidia_models/marketing_strategy/Makefile new file mode 100644 index 00000000..6744bfe9 --- /dev/null +++ b/nvidia_models/marketing_strategy/Makefile @@ -0,0 +1,40 @@ +.PHONY: all format lint test tests integration_tests help + +# Default target executed when no arguments are given to make. +all: help + +install: ## Install the poetry environment and dependencies + poetry install --no-root + +clean: ## Clean up cache directories and build artifacts + find . -type d -name "__pycache__" -exec rm -rf {} + + find . -type d -name "*.pyc" -exec rm -rf {} + + find . -type d -name ".ruff_cache" -exec rm -rf {} + + find . -type d -name ".pytest_cache" -exec rm -rf {} + + find . -type d -name ".coverage" -exec rm -rf {} + + rm -rf dist/ + rm -rf build/ + +###################### +# LINTING AND FORMATTING +###################### + +# Define a variable for Python and notebook files. +PYTHON_FILES=. +MYPY_CACHE=.mypy_cache +lint: ## Run code quality tools + poetry run ruff check $(PYTHON_FILES) + poetry run ruff format $(PYTHON_FILES) --check + +format: ## Format code using ruff + poetry run ruff format $(PYTHON_FILES) + poetry run ruff check $(PYTHON_FILES) --fix + +###################### +# HELP +###################### + +help: + @echo '----' + @echo 'format - run code formatters' + @echo 'lint - run linters' diff --git a/nvidia_models/marketing_strategy/README.md b/nvidia_models/marketing_strategy/README.md new file mode 100644 index 00000000..6d2e354d --- /dev/null +++ b/nvidia_models/marketing_strategy/README.md @@ -0,0 +1,51 @@ + +# AI Crew for Marketing Strategy using NVIDIA NIM Endpoint +## Introduction +This project demonstrates the use of the CrewAI framework to automate the creation of a marketing strategy. CrewAI orchestrates autonomous AI agents powered by NVIDIA LLM endpoints, enabling them to collaborate and execute complex tasks efficiently. + +By [@joaomdmoura](https://x.com/joaomdmoura) + +- [NVIDIA NIM](https://docs.api.nvidia.com/?ncid=no-ncid) +- [langchain-nvidia-ai-endpoints](https://github.com/langchain-ai/langchain-nvidia) + +# NVIDIA NIMs + +The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on +NVIDIA NIM inference microservice. NIM supports models across domains like chat, embedding, and re-ranking models +from the community as well as NVIDIA. These models are optimized by NVIDIA to deliver the best performance on NVIDIA +accelerated infrastructure and deployed as a NIM, an easy-to-use, prebuilt containers that deploy anywhere using a single +command on NVIDIA accelerated infrastructure. + +NVIDIA hosted deployments of NIMs are available to test on the [NVIDIA API catalog](https://build.nvidia.com/). After testing, +NIMs can be exported from NVIDIA’s API catalog using the NVIDIA AI Enterprise license and run on-premises or in the cloud, +giving enterprises ownership and full control of their IP and AI application. + +NIMs are packaged as container images on a per model basis and are distributed as NGC container images through the NVIDIA NGC Catalog. +At their core, NIMs provide easy, consistent, and familiar APIs for running inference on an AI model. + +This example goes over how to use LangChain to interact with NVIDIA supported via the `ChatNVIDIA` class. + +For more information on accessing the chat models through this api, check out the [ChatNVIDIA](https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/) documentation. + +## CrewAI Framework +CrewAI is designed to facilitate the collaboration of role-playing AI agents. In this example, these agents work together to create a comprehensive marketing strategy and develop compelling marketing content. + +## Running the Script +It uses meta/llama-3.1-8b-instruct by default so you should have access to that to run it. + +***Disclaimer:** This will use gpt-4o unless you change it to use a different model, and by doing so it may incur in different costs.* + +- **Configure Environment**: Copy `.env.example` and set up the environment variables for [OpenAI](https://platform.openai.com/api-keys) and other tools as needed, like [Serper](serper.dev). +- **Install Dependencies**: Run `make install`. +- **Customize**: Modify `src/marketing_posts/main.py` to add custom inputs for your agents and tasks. +- **Customize Further**: Check `src/marketing_posts/config/agents.yaml` to update your agents and `src/marketing_posts/config/tasks.yaml` to update your tasks. +- **Execute the Script**: Run `poetry run marketing_posts` and input your project details. + +## Details & Explanation +- **Running the Script**: Execute `poetry run marketing_posts`. The script will leverage the CrewAI framework to generate a detailed marketing strategy. +- **Key Components**: + - `src/marketing_posts/main.py`: Main script file. + - `src/marketing_posts/crew.py`: Main crew file where agents and tasks come together, and the main logic is executed. + - `src/marketing_posts/config/agents.yaml`: Configuration file for defining agents. + - `src/marketing_posts/config/tasks.yaml`: Configuration file for defining tasks. + - `src/marketing_posts/tools`: Contains tool classes used by the agents. diff --git a/nvidia_models/marketing_strategy/pyproject.toml b/nvidia_models/marketing_strategy/pyproject.toml new file mode 100644 index 00000000..bbbac835 --- /dev/null +++ b/nvidia_models/marketing_strategy/pyproject.toml @@ -0,0 +1,20 @@ +[tool.poetry] +name = "marketing_posts" +version = "0.1.0" +description = "marketing-posts using crewAI" +authors = ["raspawar raspawar@nvidia.com"] + +[tool.poetry.dependencies] +python = ">=3.10,<=3.13" +langchain-nvidia-ai-endpoints = "^0.3.5" +python-dotenv = "^1.0.1" +crewai = "^0.80.0" +crewai-tools = "^0.14.0" + +[tool.poetry.scripts] +marketing_posts = "marketing_posts.main:run" +train = "marketing_posts.main:train" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/nvidia_models/marketing_strategy/scripts/check_pydantic.sh b/nvidia_models/marketing_strategy/scripts/check_pydantic.sh new file mode 100755 index 00000000..06b5bb81 --- /dev/null +++ b/nvidia_models/marketing_strategy/scripts/check_pydantic.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# This script searches for lines starting with "import pydantic" or "from pydantic" +# in tracked files within a Git repository. +# +# Usage: ./scripts/check_pydantic.sh /path/to/repository + +# Check if a path argument is provided +if [ $# -ne 1 ]; then + echo "Usage: $0 /path/to/repository" + exit 1 +fi + +repository_path="$1" + +# Search for lines matching the pattern within the specified repository +result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic') + +# Check if any matching lines were found +if [ -n "$result" ]; then + echo "ERROR: The following lines need to be updated:" + echo "$result" + echo "Please replace the code with an import from langchain_core.pydantic_v1." + echo "For example, replace 'from pydantic import BaseModel'" + echo "with 'from langchain_core.pydantic_v1 import BaseModel'" + exit 1 +fi diff --git a/nvidia_models/marketing_strategy/scripts/lint_imports.sh b/nvidia_models/marketing_strategy/scripts/lint_imports.sh new file mode 100755 index 00000000..695613c7 --- /dev/null +++ b/nvidia_models/marketing_strategy/scripts/lint_imports.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -eu + +# Initialize a variable to keep track of errors +errors=0 + +# make sure not importing from langchain or langchain_experimental +git --no-pager grep '^from langchain\.' . && errors=$((errors+1)) +git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1)) + +# Decide on an exit status based on the errors +if [ "$errors" -gt 0 ]; then + exit 1 +else + exit 0 +fi diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/__init__.py b/nvidia_models/marketing_strategy/src/marketing_posts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/config/agents.yaml b/nvidia_models/marketing_strategy/src/marketing_posts/config/agents.yaml new file mode 100644 index 00000000..014b61a4 --- /dev/null +++ b/nvidia_models/marketing_strategy/src/marketing_posts/config/agents.yaml @@ -0,0 +1,43 @@ +lead_market_analyst: + role: > + Lead Market Analyst + goal: > + Conduct amazing analysis of the products and competitors, providing in-depth + insights to guide marketing strategies. + backstory: > + As the Lead Market Analyst at a premier digital marketing firm, you specialize + in dissecting online business landscapes. + +chief_marketing_strategist: + role: > + Chief Marketing Strategist + goal: > + Synthesize amazing insights from product analysis to formulate incredible + marketing strategies. + backstory: > + You are the Chief Marketing Strategist at a leading digital marketing agency, + known for crafting bespoke strategies that drive success. + +creative_content_creator: + role: > + Creative Content Creator + goal: > + Develop compelling and innovative content for social media campaigns, with a + focus on creating high-impact ad copies. + backstory: > + As a Creative Content Creator at a top-tier digital marketing agency, you + excel in crafting narratives that resonate with audiences. Your expertise + lies in turning marketing strategies into engaging stories and visual + content that capture attention and inspire action. + +chief_creative_director: + role: > + Chief Creative Director + goal: > + Oversee the work done by your team to make sure it is the best possible and + aligned with the product goals, review, approve, ask clarifying questions or + delegate follow-up work if necessary. + backstory: > + You are the Chief Content Officer at a leading digital marketing agency + specializing in product branding. You ensure your team crafts the best + possible content for the customer. diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/config/tasks.yaml b/nvidia_models/marketing_strategy/src/marketing_posts/config/tasks.yaml new file mode 100644 index 00000000..ce0616cb --- /dev/null +++ b/nvidia_models/marketing_strategy/src/marketing_posts/config/tasks.yaml @@ -0,0 +1,42 @@ +research_task: + description: > + Conduct a thorough research about the customer and competitors in the context + of {customer_domain}. + Make sure you find any interesting and relevant information given the + current year is 2024. + We are working with them on the following project: {project_description}. + expected_output: > + A complete report on the customer and their customers and competitors, + including their demographics, preferences, market positioning and audience engagement. + +project_understanding_task: + description: > + Understand the project details and the target audience for + {project_description}. + Review any provided materials and gather additional information as needed. + expected_output: > + A detailed summary of the project and a profile of the target audience. + +marketing_strategy_task: + description: > + Formulate a comprehensive marketing strategy for the project + {project_description} of the customer {customer_domain}. + Use the insights from the research task and the project understanding + task to create a high-quality strategy. + expected_output: > + A detailed marketing strategy document that outlines the goals, target + audience, key messages, and proposed tactics, make sure to have name, tatics, channels and KPIs + +campaign_idea_task: + description: > + Develop creative marketing campaign ideas for {project_description}. + Ensure the ideas are innovative, engaging, and aligned with the overall marketing strategy. + expected_output: > + A list of 5 campaign ideas, each with a brief description and expected impact. + +copy_creation_task: + description: > + Create marketing copies based on the approved campaign ideas for {project_description}. + Ensure the copies are compelling, clear, and tailored to the target audience. + expected_output: > + Marketing copies for each campaign idea. diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/crew.py b/nvidia_models/marketing_strategy/src/marketing_posts/crew.py new file mode 100644 index 00000000..315d5433 --- /dev/null +++ b/nvidia_models/marketing_strategy/src/marketing_posts/crew.py @@ -0,0 +1,139 @@ +from typing import List +from crewai import Agent, Crew, Process, Task +from crewai.project import CrewBase, agent, crew, task + +# Uncomment the following line to use an example of a custom tool +# from marketing_posts.tools.custom_tool import MyCustomTool + +# Check our tools documentations for more information on how to use them +from crewai_tools import SerperDevTool, ScrapeWebsiteTool +from pydantic import BaseModel, Field +from dotenv import load_dotenv +import os +from marketing_posts.llm import nvllm +from langchain_nvidia_ai_endpoints import ChatNVIDIA + +load_dotenv() + +model = os.getenv("MODEL", "meta/llama-3.1-8b-instruct") +llm = ChatNVIDIA(model=model) +default_llm = nvllm(model_str="nvidia_nim/" + model, llm=llm) + +os.environ["NVIDIA_NIM_API_KEY"] = os.getenv("NVIDIA_API_KEY") + + +class MarketStrategy(BaseModel): + """Market strategy model""" + + name: str = Field(..., description="Name of the market strategy") + tatics: List[str] = Field( + ..., description="List of tactics to be used in the market strategy" + ) + channels: List[str] = Field( + ..., description="List of channels to be used in the market strategy" + ) + KPIs: List[str] = Field( + ..., description="List of KPIs to be used in the market strategy" + ) + + +class CampaignIdea(BaseModel): + """Campaign idea model""" + + name: str = Field(..., description="Name of the campaign idea") + description: str = Field(..., description="Description of the campaign idea") + audience: str = Field(..., description="Audience of the campaign idea") + channel: str = Field(..., description="Channel of the campaign idea") + + +class Copy(BaseModel): + """Copy model""" + + title: str = Field(..., description="Title of the copy") + body: str = Field(..., description="Body of the copy") + + +@CrewBase +class MarketingPostsCrew: + """MarketingPosts crew""" + + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + + @agent + def lead_market_analyst(self) -> Agent: + return Agent( + config=self.agents_config["lead_market_analyst"], + tools=[SerperDevTool(), ScrapeWebsiteTool()], + verbose=True, + memory=False, + llm=default_llm, + ) + + @agent + def chief_marketing_strategist(self) -> Agent: + return Agent( + config=self.agents_config["chief_marketing_strategist"], + tools=[SerperDevTool(), ScrapeWebsiteTool()], + verbose=True, + memory=False, + llm=default_llm, + ) + + @agent + def creative_content_creator(self) -> Agent: + return Agent( + config=self.agents_config["creative_content_creator"], + verbose=True, + memory=False, + llm=default_llm, + ) + + @task + def research_task(self) -> Task: + return Task( + config=self.tasks_config["research_task"], agent=self.lead_market_analyst() + ) + + @task + def project_understanding_task(self) -> Task: + return Task( + config=self.tasks_config["project_understanding_task"], + agent=self.chief_marketing_strategist(), + ) + + @task + def marketing_strategy_task(self) -> Task: + return Task( + config=self.tasks_config["marketing_strategy_task"], + agent=self.chief_marketing_strategist(), + output_pydantic=MarketStrategy, + ) + + @task + def campaign_idea_task(self) -> Task: + return Task( + config=self.tasks_config["campaign_idea_task"], + agent=self.creative_content_creator(), + output_pydantic=CampaignIdea, + ) + + @task + def copy_creation_task(self) -> Task: + return Task( + config=self.tasks_config["copy_creation_task"], + agent=self.creative_content_creator(), + context=[self.marketing_strategy_task(), self.campaign_idea_task()], + output_pydantic=Copy, + ) + + @crew + def crew(self) -> Crew: + """Creates the MarketingPosts crew""" + return Crew( + agents=self.agents, # Automatically created by the @agent decorator + tasks=self.tasks, # Automatically created by the @task decorator + process=Process.sequential, + # verbose=2, + # process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/ + ) diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/llm.py b/nvidia_models/marketing_strategy/src/marketing_posts/llm.py new file mode 100644 index 00000000..1ae3e370 --- /dev/null +++ b/nvidia_models/marketing_strategy/src/marketing_posts/llm.py @@ -0,0 +1,112 @@ +from typing import Any, Dict, List, Optional, Union + +import litellm +from crewai import LLM +import logging +from crewai.utilities.exceptions.context_window_exceeding_exception import ( + LLMContextLengthExceededException, +) + +from langchain_nvidia_ai_endpoints import ChatNVIDIA + + +class nvllm(LLM): + def __init__( + self, + llm: ChatNVIDIA, + model_str: str, + timeout: Optional[Union[float, int]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + n: Optional[int] = None, + stop: Optional[Union[str, List[str]]] = None, + max_completion_tokens: Optional[int] = None, + max_tokens: Optional[int] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[Dict[int, float]] = None, + response_format: Optional[Dict[str, Any]] = None, + seed: Optional[int] = None, + logprobs: Optional[bool] = None, + top_logprobs: Optional[int] = None, + base_url: Optional[str] = None, + api_version: Optional[str] = None, + api_key: Optional[str] = None, + callbacks: List[Any] = None, + **kwargs, + ): + self.model = model_str + self.timeout = timeout + self.temperature = temperature + self.top_p = top_p + self.n = n + self.stop = stop + self.max_completion_tokens = max_completion_tokens + self.max_tokens = max_tokens + self.presence_penalty = presence_penalty + self.frequency_penalty = frequency_penalty + self.logit_bias = logit_bias + self.response_format = response_format + self.seed = seed + self.logprobs = logprobs + self.top_logprobs = top_logprobs + self.base_url = base_url + self.api_version = api_version + self.api_key = api_key + self.callbacks = callbacks + self.kwargs = kwargs + self.llm = llm + + if callbacks is None: + self.callbacks = callbacks = [] + + self.set_callbacks(callbacks) + + def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = None) -> str: + if callbacks is None: + callbacks = [] + if callbacks and len(callbacks) > 0: + self.set_callbacks(callbacks) + + try: + params = { + "model": self.llm.model, + "input": messages, + "timeout": self.timeout, + "temperature": self.temperature, + "top_p": self.top_p, + "n": self.n, + "stop": self.stop, + "max_tokens": self.max_tokens or self.max_completion_tokens, + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "logit_bias": self.logit_bias, + "response_format": self.response_format, + "seed": self.seed, + "logprobs": self.logprobs, + "top_logprobs": self.top_logprobs, + "api_key": self.api_key, + **self.kwargs, + } + + response = self.llm.invoke(**params) + return response.content + except Exception as e: + if not LLMContextLengthExceededException(str(e))._is_context_limit_error( + str(e) + ): + logging.error(f"LiteLLM call failed: {str(e)}") + + raise # Re-raise the exception after logging + + def set_callbacks(self, callbacks: List[Any]): + callback_types = [type(callback) for callback in callbacks] + for callback in litellm.success_callback[:]: + if type(callback) in callback_types: + litellm.success_callback.remove(callback) + + for callback in litellm._async_success_callback[:]: + if type(callback) in callback_types: + litellm._async_success_callback.remove(callback) + + litellm.callbacks = callbacks diff --git a/nvidia_models/marketing_strategy/src/marketing_posts/main.py b/nvidia_models/marketing_strategy/src/marketing_posts/main.py new file mode 100644 index 00000000..aa108dd7 --- /dev/null +++ b/nvidia_models/marketing_strategy/src/marketing_posts/main.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +import sys +from marketing_posts.crew import MarketingPostsCrew + + +def run(): + # Replace with your inputs, it will automatically interpolate any tasks and agents information + inputs = { + "customer_domain": "nvidia.com/en-in/ai/", + "project_description": """ +nvidia, a leading provider of NIMs, aims to revolutionize marketing automation for its enterprise clients. This project involves developing an innovative marketing strategy to showcase nvidia's NIMs, emphasizing ease of use, scalability, and integration capabilities. The campaign will target tech-savvy decision-makers in medium to large enterprises, highlighting success stories and the transformative potential of nvidia's platform. + +Customer Domain: AI and Automation Solutions +Project Overview: Creating a comprehensive marketing campaign to boost awareness and adoption of nvidia's services among enterprise clients. +""", + } + MarketingPostsCrew().crew().kickoff(inputs=inputs) + + +def train(): + """ + Train the crew for a given number of iterations. + """ + inputs = { + "customer_domain": "nvidia.com", + "project_description": """ +nvidia, a leading provider of gpus, aims to revolutionize marketing automation for its enterprise clients. This project involves developing an innovative marketing strategy to showcase nvidia's advanced gpu, emphasizing ease of use, scalability, and integration capabilities. The campaign will target tech-savvy decision-makers in medium to large enterprises, highlighting success stories and the transformative potential of nvidia's platform. + +Customer Domain: AI and Automation Solutions +Project Overview: Creating a comprehensive marketing campaign to boost awareness and adoption of nvidia's services among enterprise clients. +""", + } + try: + MarketingPostsCrew().crew().train(n_iterations=int(sys.argv[1]), inputs=inputs) + + except Exception as e: + raise Exception(f"An error occurred while training the crew: {e}")