Skip to content

Commit

Permalink
fix naming convention
Browse files Browse the repository at this point in the history
  • Loading branch information
LVH-Tony committed Feb 22, 2024
1 parent ff7f250 commit d1786c8
Show file tree
Hide file tree
Showing 48 changed files with 173 additions and 163 deletions.
File renamed without changes.
8 changes: 4 additions & 4 deletions prompting/agent.py → einstein/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
import time
import bittensor as bt
from dataclasses import asdict
from prompting.tasks import Task
from prompting.llm import HuggingFaceLLM
from prompting.cleaners.cleaner import CleanerPipeline
from prompting.persona import Persona, create_persona
from einstein.tasks import Task
from einstein.llm import HuggingFaceLLM
from einstein.cleaners.cleaner import CleanerPipeline
from einstein.persona import Persona, create_persona
from transformers import Pipeline

import warnings
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions prompting/base/miner.py → einstein/base/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import asyncio
import threading
import bittensor as bt
from prompting.base.neuron import BaseNeuron
from prompting.utils.config import add_miner_args
from einstein.base.neuron import BaseNeuron
from einstein.utils.config import add_miner_args
from traceback import print_exception


Expand Down
8 changes: 4 additions & 4 deletions prompting/base/neuron.py → einstein/base/neuron.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@
from abc import ABC, abstractmethod

# Sync calls set weights and also resyncs the metagraph.
from prompting.utils.config import check_config, add_args, config
from prompting.utils.misc import ttl_get_block
from prompting import __spec_version__ as spec_version
from einstein.utils.config import check_config, add_args, config
from einstein.utils.misc import ttl_get_block
from einstein import __spec_version__ as spec_version

from prompting.mock import MockSubtensor, MockMetagraph
from einstein.mock import MockSubtensor, MockMetagraph


class BaseNeuron(ABC):
Expand Down
6 changes: 3 additions & 3 deletions prompting/base/validator.py → einstein/base/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
from typing import List
from traceback import print_exception

from prompting.base.neuron import BaseNeuron
from prompting.mock import MockDendrite
from prompting.utils.config import add_validator_args
from einstein.base.neuron import BaseNeuron
from einstein.mock import MockDendrite
from einstein.utils.config import add_validator_args


class BaseValidatorNeuron(BaseNeuron):
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import bittensor as bt

from prompting.cleaners.all_cleaners import RemoveQuotes, RemoveRoles, PruneEnding
from einstein.cleaners.all_cleaners import RemoveQuotes, RemoveRoles, PruneEnding

SUPPORTED_CLEANERS = {
"remove_quotes": RemoveQuotes,
Expand Down
4 changes: 2 additions & 2 deletions prompting/conversation.py → einstein/conversation.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from prompting.tasks import (
from einstein.tasks import (
Task,
MathTask,
)
from prompting.tools import (
from einstein.tools import (
MathDataset,
)

Expand Down
File renamed without changes.
22 changes: 12 additions & 10 deletions prompting/forward.py → einstein/forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
import bittensor as bt

from typing import List
from prompting.agent import HumanAgent
from prompting.dendrite import DendriteResponseEvent
from prompting.conversation import create_task
from prompting.protocol import PromptingSynapse
from prompting.rewards import RewardResult
from prompting.utils.uids import get_random_uids
from prompting.utils.logging import log_event
from einstein.agent import HumanAgent
from einstein.dendrite import DendriteResponseEvent
from einstein.conversation import create_task
from einstein.protocol import CoreSynapse
from einstein.rewards import RewardResult
from einstein.utils.uids import get_random_uids
from einstein.utils.logging import log_event


async def run_step(
Expand Down Expand Up @@ -39,9 +39,9 @@ async def run_step(

axons = [self.metagraph.axons[uid] for uid in uids]
# Make calls to the network with the prompt.
responses: List[PromptingSynapse] = await self.dendrite(
responses: List[CoreSynapse] = await self.dendrite(
axons=axons,
synapse=PromptingSynapse(roles=["user"], messages=[agent.challenge]),
synapse=CoreSynapse(roles=["user"], messages=[agent.challenge]),
timeout=timeout,
)

Expand Down Expand Up @@ -88,7 +88,9 @@ async def forward(self):
bt.logging.info(
f"📋 Selecting task... from {self.config.neuron.tasks} with distribution {self.config.neuron.task_p}"
)
bt.logging.info(f"Tasks: {self.config.neuron.tasks}, Probabilities: {self.config.neuron.task_p}")
bt.logging.info(
f"Tasks: {self.config.neuron.tasks}, Probabilities: {self.config.neuron.task_p}"
)

# Create a specific task
# task_name = np.random.choice(
Expand Down
4 changes: 2 additions & 2 deletions prompting/llm.py → einstein/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
import bittensor as bt

from transformers import Pipeline, pipeline
from prompting.mock import MockPipeline
from einstein.mock import MockPipeline

from prompting.cleaners.cleaner import CleanerPipeline
from einstein.cleaners.cleaner import CleanerPipeline


def load_pipeline(
Expand Down
File renamed without changes.
File renamed without changes.
52 changes: 26 additions & 26 deletions prompting/protocol.py → einstein/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
from starlette.responses import StreamingResponse


class PromptingSynapse(bt.Synapse):
class CoreSynapse(bt.Synapse):
"""
The PromptingSynapse subclass of the Synapse class encapsulates the functionalities related to prompting scenarios.
The CoreSynapse subclass of the Synapse class encapsulates the functionalities related to prompting scenarios.
It specifies three fields - `roles`, `messages` and `completion` - that define the state of the PromptingSynapse object.
It specifies three fields - `roles`, `messages` and `completion` - that define the state of the CoreSynapse object.
The `roles` and `messages` are read-only fields defined during object initialization, and `completion` is a mutable
field that can be updated as the prompting scenario progresses.
Expand All @@ -23,18 +23,18 @@ class PromptingSynapse(bt.Synapse):
required_hash_fields List[str]: A list of fields that are required for the hash.
Methods:
deserialize() -> "PromptingSynapse": Returns the instance of the current object.
deserialize() -> "CoreSynapse": Returns the instance of the current object.
The `PromptingSynapse` class also overrides the `deserialize` method, returning the
The `CoreSynapse` class also overrides the `deserialize` method, returning the
instance itself when this method is invoked. Additionally, it provides a `Config`
inner class that enforces the validation of assignments (`validate_assignment = True`).
Here is an example of how the `PromptingSynapse` class can be used:
Here is an example of how the `CoreSynapse` class can be used:
```python
# Create a PromptingSynapse instance
prompt = PromptingSynapse(roles=["system", "user"], messages=["Hello", "Hi"])
# Create a CoreSynapse instance
prompt = CoreSynapse(roles=["system", "user"], messages=["Hello", "Hi"])
# Print the roles and messages
print("Roles:", prompt.roles)
Expand All @@ -56,49 +56,49 @@ class PromptingSynapse(bt.Synapse):
Completion: "The meaning of life is 42. Deal with it, human."
```
This example demonstrates how to create an instance of the `PromptingSynapse` class, access the
This example demonstrates how to create an instance of the `CoreSynapse` class, access the
`roles` and `messages` fields, and update the `completion` field.
"""

class Config:
"""
Pydantic model configuration class for PromptingSynapse. This class sets validation of attribute assignment as True.
Pydantic model configuration class for CoreSynapse. This class sets validation of attribute assignment as True.
validate_assignment set to True means the pydantic model will validate attribute assignments on the class.
"""

validate_assignment = True

def deserialize(self) -> "PromptingSynapse":
def deserialize(self) -> "CoreSynapse":
"""
Returns the instance of the current PromptingSynapse object.
Returns the instance of the current CoreSynapse object.
This method is intended to be potentially overridden by subclasses for custom deserialization logic.
In the context of the PromptingSynapse class, it simply returns the instance itself. However, for subclasses
In the context of the CoreSynapse class, it simply returns the instance itself. However, for subclasses
inheriting from this class, it might give a custom implementation for deserialization if need be.
Returns:
PromptingSynapse: The current instance of the PromptingSynapse class.
CoreSynapse: The current instance of the CoreSynapse class.
"""
return self

roles: List[str] = pydantic.Field(
...,
title="Roles",
description="A list of roles in the PromptingSynapse scenario. Immuatable.",
description="A list of roles in the CoreSynapse scenario. Immuatable.",
allow_mutation=False,
)

messages: List[str] = pydantic.Field(
...,
title="Messages",
description="A list of messages in the PromptingSynapse scenario. Immutable.",
description="A list of messages in the CoreSynapse scenario. Immutable.",
allow_mutation=False,
)

completion: str = pydantic.Field(
"",
title="Completion",
description="Completion status of the current PromptingSynapse object. This attribute is mutable and can be updated.",
description="Completion status of the current CoreSynapse object. This attribute is mutable and can be updated.",
)

required_hash_fields: List[str] = pydantic.Field(
Expand All @@ -109,13 +109,13 @@ def deserialize(self) -> "PromptingSynapse":
)


class StreamPromptingSynapse(bt.StreamingSynapse):
class StreamCoreSynapse(bt.StreamingSynapse):
"""
StreamPromptingSynapse is a specialized implementation of the `StreamingSynapse` tailored for prompting functionalities within
StreamCoreSynapse is a specialized implementation of the `StreamingSynapse` tailored for prompting functionalities within
the Bittensor network. This class is intended to interact with a streaming response that contains a sequence of tokens,
which represent prompts or messages in a certain scenario.
As a developer, when using or extending the `StreamPromptingSynapse` class, you should be primarily focused on the structure
As a developer, when using or extending the `StreamCoreSynapse` class, you should be primarily focused on the structure
and behavior of the prompts you are working with. The class has been designed to seamlessly handle the streaming,
decoding, and accumulation of tokens that represent these prompts.
Expand All @@ -141,21 +141,21 @@ class StreamPromptingSynapse(bt.StreamingSynapse):
- `extract_response_json`: Extracts relevant JSON data from the response, useful for gaining insights on the response's
metadata or for debugging purposes.
Note: While you can directly use the `StreamPromptingSynapse` class, it's designed to be extensible. Thus, you can create
Note: While you can directly use the `StreamCoreSynapse` class, it's designed to be extensible. Thus, you can create
subclasses to further customize behavior for specific prompting scenarios or requirements.
"""

roles: List[str] = pydantic.Field(
...,
title="Roles",
description="A list of roles in the PromptingSynapse scenario. Immuatable.",
description="A list of roles in the CoreSynapse scenario. Immuatable.",
allow_mutation=False,
)

messages: List[str] = pydantic.Field(
...,
title="Messages",
description="A list of messages in the PromptingSynapse scenario. Immutable.",
description="A list of messages in the CoreSynapse scenario. Immutable.",
allow_mutation=False,
)

Expand All @@ -169,13 +169,13 @@ class StreamPromptingSynapse(bt.StreamingSynapse):
completion: str = pydantic.Field(
"",
title="Completion",
description="Completion status of the current PromptingSynapse object. This attribute is mutable and can be updated.",
description="Completion status of the current CoreSynapse object. This attribute is mutable and can be updated.",
)

async def process_streaming_response(self, response: StreamingResponse):
"""
`process_streaming_response` is an asynchronous method designed to process the incoming streaming response from the
Bittensor network. It's the heart of the StreamPromptingSynapse class, ensuring that streaming tokens, which represent
Bittensor network. It's the heart of the StreamCoreSynapse class, ensuring that streaming tokens, which represent
prompts or messages, are decoded and appropriately managed.
As the streaming response is consumed, the tokens are decoded from their 'utf-8' encoded format, split based on
Expand Down Expand Up @@ -223,7 +223,7 @@ def extract_response_json(self, response: StreamingResponse) -> dict:
dict: A structured dictionary containing:
- Basic response metadata such as name, timeout, total_size, and header_size.
- Dendrite and Axon related information extracted from headers.
- Roles and Messages pertaining to the current StreamPromptingSynapse instance.
- Roles and Messages pertaining to the current StreamCoreSynapse instance.
- The accumulated completion.
"""
headers = {
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import torch
from typing import List
from sympy import simplify, symbols, parse_expr
from prompting.rewards import BaseRewardModel, BatchRewardOutput
from einstein.rewards import BaseRewardModel, BatchRewardOutput

class AdvancedMathModel(BaseRewardModel):
@property
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import torch
from typing import List
from sympy.parsing.sympy_parser import parse_expr
from prompting.rewards import BaseRewardModel, BatchRewardOutput, RewardModelTypeEnum
from einstein.rewards import BaseRewardModel, BatchRewardOutput, RewardModelTypeEnum


class FloatDiffModel(BaseRewardModel):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from typing import List

from prompting.tasks import MathTask
from prompting.rewards import BaseRewardModel
from prompting.rewards.float_diff import FloatDiffModel
from prompting.rewards.advanced_math import AdvancedMathModel
from einstein.tasks import MathTask
from einstein.rewards import BaseRewardModel
from einstein.rewards.float_diff import FloatDiffModel
from einstein.rewards.advanced_math import AdvancedMathModel

SUPPORTED_TASKS = {
"math": MathTask
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion prompting/tasks/math.py → einstein/tasks/math.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import sys
import bittensor as bt
from dataclasses import dataclass
from prompting.tasks import Task
from einstein.tasks import Task


@dataclass
Expand Down
4 changes: 2 additions & 2 deletions prompting/tasks/task.py → einstein/tasks/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
from dataclasses import dataclass
from enum import Enum
from typing import List, Union, Dict
from prompting.llm import HuggingFaceLLM
from einstein.llm import HuggingFaceLLM
from transformers import Pipeline
from prompting.cleaners.cleaner import CleanerPipeline
from einstein.cleaners.cleaner import CleanerPipeline


class TaskEvaluationType(Enum):
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
6 changes: 3 additions & 3 deletions prompting/utils/logging.py → einstein/utils/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from datetime import datetime
from typing import List
from loguru import logger
import prompting
import einstein

@dataclass
class Log:
Expand Down Expand Up @@ -61,8 +61,8 @@ def init_wandb(self, reinit=False):
"""Starts a new wandb run."""
tags = [
self.wallet.hotkey.ss58_address,
prompting.__version__,
str(prompting.__spec_version__),
einstein.__version__,
str(einstein.__spec_version__),
f"netuid_{self.metagraph.netuid}",
]

Expand Down
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit d1786c8

Please sign in to comment.