From 612c9eb93da1a01d7b9e47e1e9eb023a4886f7ad Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Mon, 29 Jan 2024 17:32:51 -0500
Subject: [PATCH 01/34] initial changes to hf miner

---
 neurons/miners/{zephyr => huggingface}/README.md   | 0
 neurons/miners/{zephyr => huggingface}/__init__.py | 0
 neurons/miners/{zephyr => huggingface}/miner.py    | 6 +++---
 3 files changed, 3 insertions(+), 3 deletions(-)
 rename neurons/miners/{zephyr => huggingface}/README.md (100%)
 rename neurons/miners/{zephyr => huggingface}/__init__.py (100%)
 rename neurons/miners/{zephyr => huggingface}/miner.py (98%)

diff --git a/neurons/miners/zephyr/README.md b/neurons/miners/huggingface/README.md
similarity index 100%
rename from neurons/miners/zephyr/README.md
rename to neurons/miners/huggingface/README.md
diff --git a/neurons/miners/zephyr/__init__.py b/neurons/miners/huggingface/__init__.py
similarity index 100%
rename from neurons/miners/zephyr/__init__.py
rename to neurons/miners/huggingface/__init__.py
diff --git a/neurons/miners/zephyr/miner.py b/neurons/miners/huggingface/miner.py
similarity index 98%
rename from neurons/miners/zephyr/miner.py
rename to neurons/miners/huggingface/miner.py
index 84d497e6..1e2fd1d8 100644
--- a/neurons/miners/zephyr/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -30,7 +30,7 @@
 from neurons.miner import Miner
 
 
-class ZephyrMiner(Miner):
+class HuggingFaceMiner(Miner):
     """
     Base miner which runs zephyr (https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)    
     This requires a GPU with at least 20GB of memory.
@@ -57,7 +57,7 @@ def __init__(self, config=None):
             )
 
         if self.config.wandb.on:
-            self.identity_tags = ("zephyr_miner", )
+            self.identity_tags = ("hf_miner", )
 
             if self.config.neuron.load_quantized:
                 self.identity_tags += ("8bits_quantization", )
@@ -134,7 +134,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
-    with ZephyrMiner() as miner:
+    with HuggingFaceMiner() as miner:
         while True:
             bt.logging.info("Miner running...", time.time())
             time.sleep(5)

From afe39935619a105d497496e2a3fd6b227f3613b7 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 30 Jan 2024 18:20:11 +0000
Subject: [PATCH 02/34] adds 8bit and 4bit config

---
 prompting/utils/config.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index a6001206..f6fe2f20 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -150,10 +150,17 @@ def add_miner_args(cls, parser):
     )
 
     parser.add_argument(
-        "--neuron.load_quantized",
+        "--neuron.load_in_8bit",
         type=str,
         default=False,
-        help="Load quantized model.",
+        help="Load quantized model in 8 bits. Note that this parameter only applies to hugging face miners.",
+    )
+
+    parser.add_argument(
+        "--neuron.load_in_4bit",
+        type=str,
+        default=False,
+        help="Load quantized model in 4 bits. Note that this parameter only applies to hugging face miners.",
     )
 
     parser.add_argument(

From a424e6e27922f35d89377631bbbceda3873f7f01 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 30 Jan 2024 18:20:34 +0000
Subject: [PATCH 03/34] simplifies torch type logic

---
 neurons/validator.py |  1 -
 prompting/llm.py     | 29 ++++++++++++-----------------
 2 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/neurons/validator.py b/neurons/validator.py
index 182a3d43..2db87e7f 100644
--- a/neurons/validator.py
+++ b/neurons/validator.py
@@ -39,7 +39,6 @@ def __init__(self, config=None):
 
         self.llm_pipeline = load_pipeline(
             model_id=self.config.neuron.model_id,
-            torch_dtype=torch.bfloat16,
             device=self.device,
             mock=self.config.mock,
         )
diff --git a/prompting/llm.py b/prompting/llm.py
index 8c4f7896..5b5a560e 100644
--- a/prompting/llm.py
+++ b/prompting/llm.py
@@ -16,7 +16,7 @@
 # DEALINGS IN THE SOFTWARE.
 
 import time
-
+import torch
 from typing import List, Dict
 import bittensor as bt
 
@@ -26,7 +26,7 @@
 from prompting.cleaners.cleaner import CleanerPipeline
 
 
-def load_pipeline(model_id, device=None, torch_dtype=None, mock=False, model_kwargs:dict = None):
+def load_pipeline(model_id, device=None, mock=False, model_kwargs:dict = None):
     """Loads the HuggingFace pipeline for the LLM, or a mock pipeline if mock=True"""
 
     if mock or model_id == "mock":
@@ -34,22 +34,17 @@ def load_pipeline(model_id, device=None, torch_dtype=None, mock=False, model_kwa
 
     if not device.startswith("cuda"):
         bt.logging.warning("Only crazy people run this on CPU. It is not recommended.")
-
-    # model_kwargs torch type definition conflicts with pipeline torch_dtype, so we need to differentiate them
+    
+    # Sets default model torch type in case is not defined
     if model_kwargs is None:
-        llm_pipeline = pipeline(
-            "text-generation",
-            model=model_id,
-            device=device,
-            torch_dtype=torch_dtype,            
-        )
-    else:
-        llm_pipeline = pipeline(
-            "text-generation",
-            model=model_id,
-            device_map=device,
-            model_kwargs=model_kwargs
-        )
+        model_kwargs = dict(torch_dtype=torch.bfloat16)
+    
+    llm_pipeline = pipeline(
+        "text-generation",
+        model=model_id,
+        device_map=device,
+        model_kwargs=model_kwargs
+    )
 
     return llm_pipeline
 

From 843f9a26426ca0235912a4d2cbca9c187788dec0 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 30 Jan 2024 18:21:30 +0000
Subject: [PATCH 04/34] adapt zephyr miner to hf miner

---
 neurons/miners/huggingface/miner.py | 28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index 1e2fd1d8..6377fb45 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -32,11 +32,10 @@
 
 class HuggingFaceMiner(Miner):
     """
-    Base miner which runs zephyr (https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)    
-    This requires a GPU with at least 20GB of memory.
+    Base 🤗 Hugging Face miner, integrated with hf pipeline.    
     To run this miner from the project root directory:
 
-    python neurons/miners/zephyr/miner.py --wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> --subtensor.network <network> --netuid <netuid> --axon.port <port> --axon.external_port <port> --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
+    python neurons/miners/huggingface/miner.py --wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> --neuron.model_id <model_id> --subtensor.network <network> --netuid <netuid> --axon.port <port> --axon.external_port <port> --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
     """
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
@@ -49,27 +48,36 @@ def __init__(self, config=None):
         super().__init__(config=config)
 
         model_kwargs = None
-        if self.config.neuron.load_quantized:
-            bt.logging.info("Loading quantized model...")
+        if self.config.neuron.load_in_8bit:
+            bt.logging.info("Loading 8 bit quantized model...")
             model_kwargs = dict(
                 torch_dtype=torch.float16,
                 load_in_8bit=True,
             )
 
+        if self.config.neuron.load_in_4bit:
+            bt.logging.info("Loading 4 bit quantized model...")
+            model_kwargs = dict(
+                torch_dtype=torch.float32,
+                load_in_4bit=True,
+            )
+
         if self.config.wandb.on:
             self.identity_tags = ("hf_miner", )
 
-            if self.config.neuron.load_quantized:
-                self.identity_tags += ("8bits_quantization", )
+            if self.config.neuron.load_in_8bit:
+                self.identity_tags += ("8bit_quantization", )            
+            elif self.config.neuron.load_in_4bit:
+                self.identity_tags += ("4bit_quantization", )
 
         self.llm_pipeline = load_pipeline(
-            model_id=self.config.neuron.model_id,
-            torch_dtype=torch.float16,
+            model_id=self.config.neuron.model_id,            
             device=self.device,
             mock=self.config.mock,
             model_kwargs=model_kwargs,
         )        
 
+        self.model_id = self.config.neuron.model_id
         self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
 
     async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
@@ -92,7 +100,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
             bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
 
             prompt = synapse.messages[-1]
-            bt.logging.debug(f"💬 Querying zephyr: {prompt}")
+            bt.logging.debug(f"💬 Querying {self.model_id}: {prompt}")
 
             response = HuggingFaceLLM(
                 llm_pipeline=self.llm_pipeline,

From e51ff2651eee83a8d6837f892747e5a3284c5f31 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 30 Jan 2024 20:06:34 +0000
Subject: [PATCH 05/34] update README for hf miner

---
 neurons/miners/huggingface/README.md | 34 +++++++++++++++++-----------
 1 file changed, 21 insertions(+), 13 deletions(-)

diff --git a/neurons/miners/huggingface/README.md b/neurons/miners/huggingface/README.md
index 2310e4a7..2bf4602a 100644
--- a/neurons/miners/huggingface/README.md
+++ b/neurons/miners/huggingface/README.md
@@ -1,5 +1,5 @@
-# Zephyr Bittensor Miner
-This repository contains a Bittensor Miner that uses [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta). The miner connects to the Bittensor network, registers its wallet, and serves the zephyr model to the network.
+# Hugging Face Bittensor Miner
+This repository contains a Bittensor Miner integrated with 🤗 Hugging Face pipelines. The miner connects to the Bittensor network, registers its wallet, and serves a hugging face model to the network.
 
 ## Prerequisites
 
@@ -12,28 +12,36 @@ This repository contains a Bittensor Miner that uses [HuggingFaceH4/zephyr-7b-be
 git clone https://github.com/opentensor/prompting.git
 ```
 2. Install the required packages for the [repository requirements](../../../requirements.txt) with `pip install -r requirements.txt`
-3. Install the required packages for the [wikipedia agent miner](requirements.txt) with `pip install -r requirements.txt`
-```
+
 
 For more configuration options related to the wallet, axon, subtensor, logging, and metagraph, please refer to the Bittensor documentation.
 
 ## Example Usage
 
-To run the Zephyr Bittensor Miner with default settings, use the following command:
+Here are some model examples that could be leveraged by the HuggingFace Miner, alongside suggested GPU footprint to run the models comfortably:
+| model_id | Default GPU footprint | 8bits quantization GPU footprint | 4bits quantization GPU footprint |
+| --- | ---- | ---- | ---- |  
+| HuggingFaceH4/zephyr-7b-beta | 18 GB | 12 GB | 7 GB |
+| teknium/OpenHermes-2.5-Mistral-7B | 30 GB | 10 GB | 7 GB |
+| upstage/SOLAR-10.7B-Instruct-v1.0 | 42 GB | 14 GB| 8 GB |
+| mistralai/Mixtral-8x7B-Instruct-v0.1 | 92 GB* | 64 GB* | 30 GB* |
+
+> \* Big models such as mixtral are very costly to run and optimize, so always bear in mind the trade-offs between model speed, model quality and infra cost.
+
+
+To run the Hugging Face Bittensor Miner with default settings, use the following command:
 ```bash
-python3 neurons/miners/zephyr/miner.py \
+python3 neurons/miners/huggingface/miner.py \
     --wallet.name <<your-wallet-name>> \
     --wallet.hotkey <<your-hotkey>>
-    --neuron.model_id HuggingFaceH4/zephyr-7b-beta
+    --neuron.model_id <<model_id>>
 ```
 
-You will need 18GB of GPU to run this miner in comfortable settings.
-
-You can also run the quantized version of this model that takes ~10GB of GPU RAM by adding the flag `--neuron.load_quantized`:
+You can also run automatic quantization by adding the flag `--neuron.load_in_8bit` for 8bits quantization and `--neuron.load_in_4bit` for 4 bits quantization:
 ```bash
-python3 neurons/miners/zephyr/miner.py \
+python3 neurons/miners/huggingface/miner.py \
     --wallet.name <<your-wallet-name>> \
     --wallet.hotkey <<your-hotkey>>
-    --neuron.model_id HuggingFaceH4/zephyr-7b-beta
-    --neuron.load_quantized True
+    --neuron.model_id <<model_id>>
+    --neuron.load_in_8bit True
 ```
\ No newline at end of file

From 65f3b741287cbae8fe995058c96307f2b891fb78 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 30 Jan 2024 22:30:03 +0000
Subject: [PATCH 06/34] adds should_force_model_loading flag to miners

---
 neurons/miners/huggingface/miner.py |  5 ++++-
 prompting/utils/config.py           | 10 +++++++---
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index 6377fb45..aa6663eb 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -69,11 +69,14 @@ def __init__(self, config=None):
                 self.identity_tags += ("8bit_quantization", )            
             elif self.config.neuron.load_in_4bit:
                 self.identity_tags += ("4bit_quantization", )
+        
+        # Forces model loading behaviour over mock flag 
+        mock = False if self.config.neuron.should_force_model_loading else self.config.mock
 
         self.llm_pipeline = load_pipeline(
             model_id=self.config.neuron.model_id,            
             device=self.device,
-            mock=self.config.mock,
+            mock=mock,
             model_kwargs=model_kwargs,
         )        
 
diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index f6fe2f20..009b7197 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -22,8 +22,6 @@
 import bittensor as bt
 from loguru import logger
 
-#TODO: enable 4bit and 8bit precision llms via config
-
 def check_config(cls, config: "bt.Config"):
     r"""Checks/validates the config namespace object."""
     bt.logging.check_config(config)
@@ -117,7 +115,6 @@ def add_args(cls, parser):
         "--wandb.off", action="store_true", help="Turn off wandb.", default=False
     )
 
-
     parser.add_argument(
         "--wandb.offline",
         action="store_true",
@@ -219,6 +216,13 @@ def add_miner_args(cls, parser):
         help="Set miner to stop on forward exception.",
     )
 
+    parser.add_argument(
+        "--neuron.should_force_model_loading",
+        type=bool,
+        default=False,
+        help="Force model loading independent of mock flag.",
+    ) 
+
     parser.add_argument(
         "--wandb.on",
         type=bool,

From 1ae75409d0304a7a154f36271075b244e1f1e1dc Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Wed, 31 Jan 2024 16:51:16 +0000
Subject: [PATCH 07/34] miners module refactor

---
 neurons/miners/__init__.py                    |   0
 neurons/miners/huggingface/__init__.py        |   0
 neurons/miners/huggingface/miner.py           | 126 +-------------
 neurons/miners/openai/__init__.py             |   0
 neurons/miners/openai/miner.py                | 135 +--------------
 neurons/miners/openai/requirements.txt        |   5 -
 neurons/miners/test/echo.py                   |  38 +----
 neurons/miners/test/mock.py                   |  36 +---
 neurons/miners/test/phrase.py                 |  49 +-----
 neurons/miners/wiki_agent/miner.py            | 113 +------------
 neurons/miners/wiki_agent/requirements.txt    |   5 -
 .../base/prompting_miner.py                   |  18 +-
 prompting/miners/__init__.py                  |  10 ++
 prompting/miners/echo.py                      |  52 ++++++
 prompting/miners/hf_miner.py                  | 154 ++++++++++++++++++
 prompting/miners/mock.py                      |  51 ++++++
 prompting/miners/openai_miner.py              | 150 +++++++++++++++++
 prompting/miners/phrase.py                    |  64 ++++++++
 .../miners/wiki_agent.py                      |   7 +-
 prompting/miners/wiki_agent_miner.py          | 129 +++++++++++++++
 requirements.txt                              |   3 +
 21 files changed, 624 insertions(+), 521 deletions(-)
 delete mode 100644 neurons/miners/__init__.py
 delete mode 100644 neurons/miners/huggingface/__init__.py
 delete mode 100644 neurons/miners/openai/__init__.py
 delete mode 100644 neurons/miners/openai/requirements.txt
 delete mode 100644 neurons/miners/wiki_agent/requirements.txt
 rename neurons/miner.py => prompting/base/prompting_miner.py (94%)
 create mode 100644 prompting/miners/__init__.py
 create mode 100644 prompting/miners/echo.py
 create mode 100644 prompting/miners/hf_miner.py
 create mode 100644 prompting/miners/mock.py
 create mode 100644 prompting/miners/openai_miner.py
 create mode 100644 prompting/miners/phrase.py
 rename neurons/miners/wiki_agent/agent.py => prompting/miners/wiki_agent.py (99%)
 create mode 100644 prompting/miners/wiki_agent_miner.py

diff --git a/neurons/miners/__init__.py b/neurons/miners/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/neurons/miners/huggingface/__init__.py b/neurons/miners/huggingface/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index aa6663eb..d452c0b1 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -14,133 +14,9 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
 import time
-import torch
-import argparse
 import bittensor as bt
-
-# Bittensor Miner Template:
-import prompting
-from prompting.protocol import PromptingSynapse
-from prompting.llm import load_pipeline
-from prompting.llm import HuggingFaceLLM
-
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-
-
-class HuggingFaceMiner(Miner):
-    """
-    Base 🤗 Hugging Face miner, integrated with hf pipeline.    
-    To run this miner from the project root directory:
-
-    python neurons/miners/huggingface/miner.py --wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> --neuron.model_id <model_id> --subtensor.network <network> --netuid <netuid> --axon.port <port> --axon.external_port <port> --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
-    """
-    @classmethod
-    def add_args(cls, parser: argparse.ArgumentParser):
-        """
-        Adds arguments to the command line parser.
-        """
-        super().add_args(parser)
-
-    def __init__(self, config=None):
-        super().__init__(config=config)
-
-        model_kwargs = None
-        if self.config.neuron.load_in_8bit:
-            bt.logging.info("Loading 8 bit quantized model...")
-            model_kwargs = dict(
-                torch_dtype=torch.float16,
-                load_in_8bit=True,
-            )
-
-        if self.config.neuron.load_in_4bit:
-            bt.logging.info("Loading 4 bit quantized model...")
-            model_kwargs = dict(
-                torch_dtype=torch.float32,
-                load_in_4bit=True,
-            )
-
-        if self.config.wandb.on:
-            self.identity_tags = ("hf_miner", )
-
-            if self.config.neuron.load_in_8bit:
-                self.identity_tags += ("8bit_quantization", )            
-            elif self.config.neuron.load_in_4bit:
-                self.identity_tags += ("4bit_quantization", )
-        
-        # Forces model loading behaviour over mock flag 
-        mock = False if self.config.neuron.should_force_model_loading else self.config.mock
-
-        self.llm_pipeline = load_pipeline(
-            model_id=self.config.neuron.model_id,            
-            device=self.device,
-            mock=mock,
-            model_kwargs=model_kwargs,
-        )        
-
-        self.model_id = self.config.neuron.model_id
-        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
-
-    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
-        """
-        Processes the incoming synapse by performing a predefined operation on the input data.
-        This method should be replaced with actual logic relevant to the miner's purpose.
-
-        Args:
-            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
-
-        Returns:
-            PromptingSynapse: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
-
-        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
-        the miner's intended operation. This method demonstrates a basic transformation of input data.
-        """
-
-        try:
-            t0 = time.time()
-            bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
-
-            prompt = synapse.messages[-1]
-            bt.logging.debug(f"💬 Querying {self.model_id}: {prompt}")
-
-            response = HuggingFaceLLM(
-                llm_pipeline=self.llm_pipeline,
-                system_prompt=self.system_prompt,
-                max_new_tokens=self.config.neuron.max_tokens,
-                do_sample=self.config.neuron.do_sample,
-                temperature=self.config.neuron.temperature,
-                top_k=self.config.neuron.top_k,
-                top_p=self.config.neuron.top_p,
-            ).query(
-                message=prompt,  # For now we just take the last message
-                role="user",
-                disregard_system_prompt=False,
-            )
-
-            synapse.completion = response
-            synapse_latency = time.time() - t0
-            
-            if self.config.wandb.on:
-                # TODO: Add system prompt to wandb config and not on every step
-                self.log_event(
-                    timing=synapse_latency,
-                    prompt=prompt,
-                    completion=response,
-                    system_prompt=self.system_prompt,
-                )
-
-            bt.logging.debug(f"✅ Served Response: {response}")
-            torch.cuda.empty_cache()
-
-        except Exception as e:
-            bt.logging.error(f"Error: {e}")
-            synapse.completion = "Error: " + str(e)
-        finally:             
-            if self.config.neuron.stop_on_forward_exception:
-                self.should_exit = True
-            return synapse
+from prompting.miners import HuggingFaceMiner
 
 
 # This is the main function, which runs the miner.
diff --git a/neurons/miners/openai/__init__.py b/neurons/miners/openai/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/neurons/miners/openai/miner.py b/neurons/miners/openai/miner.py
index 67350eba..a7d77b0d 100644
--- a/neurons/miners/openai/miner.py
+++ b/neurons/miners/openai/miner.py
@@ -14,142 +14,9 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
-import os
 import time
 import bittensor as bt
-import argparse
-# Bittensor Miner Template:
-import prompting
-from prompting.protocol import PromptingSynapse
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-
-from langchain.prompts import ChatPromptTemplate
-from langchain_core.output_parsers import StrOutputParser
-from langchain.chat_models import ChatOpenAI
-from dotenv import load_dotenv, find_dotenv
-from langchain.callbacks import get_openai_callback
-
-
-
-class OpenAIMiner(Miner):
-    """Langchain-based miner which uses OpenAI's API as the LLM.
-
-    You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
-    """
-    @classmethod
-    def add_args(cls, parser: argparse.ArgumentParser):
-        """
-        Adds OpenAI-specific arguments to the command line parser.
-        """
-        super().add_args(parser)
-
-
-    def __init__(self, config=None):
-        super().__init__(config=config)
-
-        bt.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
-
-        if self.config.wandb.on:
-            self.identity_tags =  ("openai_miner", ) + (self.config.neuron.model_id, )
-        
-        _ = load_dotenv(find_dotenv()) 
-        api_key = os.environ.get("OPENAI_API_KEY")        
-
-        # Set openai key and other args
-        self.model = ChatOpenAI(
-            api_key=api_key,
-            model_name=self.config.neuron.model_id,
-            max_tokens = self.config.neuron.max_tokens,
-            temperature = self.config.neuron.temperature,            
-        )
-
-        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
-        self.accumulated_total_tokens = 0
-        self.accumulated_prompt_tokens = 0
-        self.accumulated_completion_tokens = 0
-        self.accumulated_total_cost = 0
-
-    def get_cost_logging(self, cb):
-        bt.logging.info(f"Total Tokens: {cb.total_tokens}")
-        bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
-        bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
-        bt.logging.info(f"Total Cost (USD): ${round(cb.total_cost,4)}")
-
-        self.accumulated_total_tokens += cb.total_tokens
-        self.accumulated_prompt_tokens += cb.prompt_tokens
-        self.accumulated_completion_tokens += cb.completion_tokens
-        self.accumulated_total_cost += cb.total_cost
-
-        return  {
-            'total_tokens': cb.total_tokens,
-            'prompt_tokens': cb.prompt_tokens,
-            'completion_tokens': cb.completion_tokens,
-            'total_cost': cb.total_cost,
-            'accumulated_total_tokens': self.accumulated_total_tokens,
-            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
-            'accumulated_completion_tokens': self.accumulated_completion_tokens,
-            'accumulated_total_cost': self.accumulated_total_cost,
-        }
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-        """
-        Processes the incoming synapse by performing a predefined operation on the input data.
-        This method should be replaced with actual logic relevant to the miner's purpose.
-
-        Args:
-            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
-
-        Returns:
-            PromptingSynapse: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
-
-        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
-        the miner's intended operation. This method demonstrates a basic transformation of input data.
-        """
-        try:
-            with get_openai_callback() as cb:
-                t0 = time.time()
-                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
-
-                prompt = ChatPromptTemplate.from_messages([
-                    ("system", self.system_prompt),
-                    ("user", "{input}")
-                ])
-                chain = prompt | self.model | StrOutputParser()
-
-                role = synapse.roles[-1]
-                message = synapse.messages[-1]
-                
-                bt.logging.debug(f"💬 Querying openai: {prompt}")
-                response = chain.invoke(
-                    {"role": role, "input": message}
-                )
-
-                synapse.completion = response
-                synapse_latency = time.time() - t0
-
-                if self.config.wandb.on:
-                    self.log_event(
-                        timing=synapse_latency, 
-                        prompt=message,
-                        completion=response,
-                        system_prompt=self.system_prompt,
-                        extra_info=self.get_cost_logging(cb)
-                    )
-
-            bt.logging.debug(f"✅ Served Response: {response}")
-            return synapse
-        except Exception as e:
-            bt.logging.error(f"Error in forward: {e}")
-            synapse.completion = "Error: " + str(e)
-        finally:
-            if self.config.neuron.stop_on_forward_exception:
-                self.should_exit = True
-            return synapse
-
+from prompting.miners import OpenAIMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
diff --git a/neurons/miners/openai/requirements.txt b/neurons/miners/openai/requirements.txt
deleted file mode 100644
index 1436c237..00000000
--- a/neurons/miners/openai/requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# TODO: Are we expecting that the miners should install the validator dependency first?
-# If so, we need to make it clear on the README. Otherwise, we should have a completely separated requirements for the miner
-openai==1.9.0
-langchain==0.1.0
-python-dotenv
\ No newline at end of file
diff --git a/neurons/miners/test/echo.py b/neurons/miners/test/echo.py
index 6a89c061..5313c648 100644
--- a/neurons/miners/test/echo.py
+++ b/neurons/miners/test/echo.py
@@ -14,47 +14,13 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
 import time
-import typing
 import bittensor as bt
-
-# Bittensor Miner Template:
-import prompting
-from prompting.protocol import PromptingSynapse
-
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-
-
-class EchoMiner(Miner):
-    """
-    This little fella just repeats the last message it received.
-    """
-
-    def __init__(self, config=None):
-        super().__init__(config=config)
-
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
-        synapse.completion = synapse.messages[-1]
-
-        return synapse
-
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
-
-    async def priority(self, synapse: PromptingSynapse) -> float:
-        return 1e6
+from prompting.miners import EchoMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
     with EchoMiner() as miner:
         while True:
             bt.logging.info("Miner running...", time.time())
-            time.sleep(5)
+            time.sleep(5)
\ No newline at end of file
diff --git a/neurons/miners/test/mock.py b/neurons/miners/test/mock.py
index 753316b8..1701dfe8 100644
--- a/neurons/miners/test/mock.py
+++ b/neurons/miners/test/mock.py
@@ -14,43 +14,9 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
 import time
-import typing
 import bittensor as bt
-
-# Bittensor Miner Template:
-import prompting
-from prompting.protocol import PromptingSynapse
-
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-
-
-class MockMiner(Miner):
-    """
-    This little fella responds with a static message.
-    """
-
-    def __init__(self, config=None):
-        super().__init__(config=config)
-
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
-        synapse.completion = f'Hey you reached mock miner {self.config.wallet.hotkey!r}. Please leave a message after the tone.. Beep!'
-
-        return synapse
-
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
-
-    async def priority(self, synapse: PromptingSynapse) -> float:
-        return 1e6
+from prompting.miners import MockMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
diff --git a/neurons/miners/test/phrase.py b/neurons/miners/test/phrase.py
index 39fcde4a..4fdce5a2 100644
--- a/neurons/miners/test/phrase.py
+++ b/neurons/miners/test/phrase.py
@@ -14,56 +14,9 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
 import time
-import typing
-import argparse
 import bittensor as bt
-
-# Bittensor Miner Template:
-import prompting
-from prompting.protocol import PromptingSynapse
-
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-
-
-class PhraseMiner(Miner):
-    """
-    This little fella responds with whatever phrase you give it.
-    """
-
-    @classmethod
-    def add_args(cls, parser: argparse.ArgumentParser):
-        
-        super().add_args(parser)
-
-        parser.add_argument(
-            "--neuron.phrase",
-            type=str,
-            help="The phrase to use when running a phrase (test) miner.",
-            default="Can you please repeat that?",
-        )
-    
-    def __init__(self, config=None):
-        super().__init__(config=config)
-
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
-        synapse.completion = self.config.neuron.phrase
-
-        return synapse
-
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
-
-    async def priority(self, synapse: PromptingSynapse) -> float:
-        return 1e6
+from prompting.miners import PhraseMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
diff --git a/neurons/miners/wiki_agent/miner.py b/neurons/miners/wiki_agent/miner.py
index d2349c34..e9bfda27 100644
--- a/neurons/miners/wiki_agent/miner.py
+++ b/neurons/miners/wiki_agent/miner.py
@@ -14,120 +14,9 @@
 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 # DEALINGS IN THE SOFTWARE.
-
 import time
 import bittensor as bt
-import argparse
-# Bittensor Miner Template:
-from prompting.protocol import PromptingSynapse
-# import base miner class which takes care of most of the boilerplate
-from neurons.miner import Miner
-from dotenv import load_dotenv, find_dotenv
-from agent import WikiAgent
-from langchain.callbacks import get_openai_callback
-
-
-class WikipediaAgentMiner(Miner):
-    """Langchain-based miner which uses OpenAI's API as the LLM. This uses the ReAct framework.
-    
-    You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
-    """
-    @classmethod
-    def add_args(cls, parser: argparse.ArgumentParser):
-        """
-        Adds OpenAI-specific arguments to the command line parser.
-        """
-        super().add_args(parser)
-
-    def __init__(self, config=None):
-        super().__init__(config=config)
-        
-        bt.logging.info(f"🤖📖 Initializing wikipedia agent with model {self.config.neuron.model_id}...")
-
-        if self.config.wandb.on:
-            self.identity_tags = ("wikipedia_agent_miner", ) + (self.config.neuron.model_id, )
-        
-        _ = load_dotenv(find_dotenv()) 
-                
-        self.agent = WikiAgent(self.config.neuron.model_id, self.config.neuron.temperature)
-        self.accumulated_total_tokens = 0
-        self.accumulated_prompt_tokens = 0
-        self.accumulated_completion_tokens = 0
-        self.accumulated_total_cost = 0
-
-
-    def get_cost_logging(self, cb):
-        bt.logging.info(f"Total Tokens: {cb.total_tokens}")
-        bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
-        bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
-        bt.logging.info(f"Total Cost (USD): ${cb.total_cost}")
-
-        self.accumulated_total_tokens += cb.total_tokens
-        self.accumulated_prompt_tokens += cb.prompt_tokens
-        self.accumulated_completion_tokens += cb.completion_tokens
-        self.accumulated_total_cost += cb.total_cost
-
-        return  {
-            'total_tokens': cb.total_tokens,
-            'prompt_tokens': cb.prompt_tokens,
-            'completion_tokens': cb.completion_tokens,
-            'total_cost': cb.total_cost,
-            'accumulated_total_tokens': self.accumulated_total_tokens,
-            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
-            'accumulated_completion_tokens': self.accumulated_completion_tokens,
-            'accumulated_total_cost': self.accumulated_total_cost,
-        }
-
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-        """
-        Processes the incoming synapse by performing a predefined operation on the input data.
-        This method should be replaced with actual logic relevant to the miner's purpose.
-
-        Args:
-            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
-
-        Returns:
-            PromptingSynapse: The synapse object with the '`dummy_output' field set to twice the 'dummy_input' value.
-
-        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
-        the miner's intended operation. This method demonstrates a basic transformation of input data.
-        """
-        try:
-            with get_openai_callback() as cb:
-                t0 = time.time()
-                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
-                            
-                message = synapse.messages[-1]
-                
-                bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")
-                
-                response = self.agent.run(message)
-
-                synapse.completion = response
-                synapse_latency = time.time() - t0
-
-                if self.config.wandb.on:
-                    self.log_event(
-                        timing=synapse_latency, 
-                        prompt=message,
-                        completion=response,
-                        system_prompt='',
-                        extra_info=self.get_cost_logging(cb)
-                    )
-
-            bt.logging.debug(f"✅ Served Response: {response}")
-            return synapse
-        except Exception as e:
-            bt.logging.error(f"Error in forward: {e}")
-            synapse.completion = "Error: " + str(e)
-        finally:
-            if self.config.neuron.stop_on_forward_exception:
-                self.should_exit = True
-            return synapse
-
+from prompting.miners import WikipediaAgentMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
diff --git a/neurons/miners/wiki_agent/requirements.txt b/neurons/miners/wiki_agent/requirements.txt
deleted file mode 100644
index 07f7b80e..00000000
--- a/neurons/miners/wiki_agent/requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# If so, we need to make it clear on the README. Otherwise, we should have a completely separated requirements for the miner
-openai==0.28
-langchain==0.1.0
-python-dotenv
-wikipedia
\ No newline at end of file
diff --git a/neurons/miner.py b/prompting/base/prompting_miner.py
similarity index 94%
rename from neurons/miner.py
rename to prompting/base/prompting_miner.py
index 1f9557db..e5abe691 100644
--- a/neurons/miner.py
+++ b/prompting/base/prompting_miner.py
@@ -25,7 +25,7 @@
 from prompting.base.miner import BaseMinerNeuron
 from datetime import datetime
 
-class Miner(BaseMinerNeuron):
+class BasePromptingMiner(BaseMinerNeuron):
     """
     Your miner neuron class. You should use this class to define your miner's behavior. In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs.
 
@@ -35,7 +35,7 @@ class Miner(BaseMinerNeuron):
     """
 
     def __init__(self, config=None):
-        super(Miner, self).__init__(config=config)                
+        super().__init__(config=config)                
         self.identity_tags = None
          
 
@@ -171,16 +171,4 @@ def log_event(self, timing: float, prompt: str, completion: str, system_prompt:
         }
 
         bt.logging.info('Logging event to wandb...', step_log)
-        wandb.log(step_log)
-
-
-# This is the main function, which runs the miner.
-if __name__ == "__main__":
-    with Miner() as miner:
-        while True:
-            bt.logging.info("Miner running...", time.time())
-            time.sleep(5)
-
-            if miner.should_exit:
-                bt.logging.warning("Ending miner...")
-                break
\ No newline at end of file
+        wandb.log(step_log)
\ No newline at end of file
diff --git a/prompting/miners/__init__.py b/prompting/miners/__init__.py
new file mode 100644
index 00000000..f0885316
--- /dev/null
+++ b/prompting/miners/__init__.py
@@ -0,0 +1,10 @@
+# Test miners
+from .echo import EchoMiner
+from .mock import MockMiner
+from .phrase import PhraseMiner
+
+# Real miners
+from .hf_miner import HuggingFaceMiner
+from .openai_miner import OpenAIMiner
+from .wiki_agent_miner import WikipediaAgentMiner
+
diff --git a/prompting/miners/echo.py b/prompting/miners/echo.py
new file mode 100644
index 00000000..9106729c
--- /dev/null
+++ b/prompting/miners/echo.py
@@ -0,0 +1,52 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+import typing
+import bittensor as bt
+
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+
+
+class EchoMiner(BasePromptingMiner):
+    """
+    This little fella just repeats the last message it received.
+    """
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+
+    async def forward(
+        self, synapse: PromptingSynapse
+    ) -> PromptingSynapse:
+
+        synapse.completion = synapse.messages[-1]
+
+        bt.logging.success(f'✅ Echoing the message {synapse.completion}...')
+
+        return synapse
+
+    async def blacklist(
+        self, synapse: PromptingSynapse
+    ) -> typing.Tuple[bool, str]:
+        return False, 'All good here'
+
+    async def priority(self, synapse: PromptingSynapse) -> float:
+        return 1e6
diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
new file mode 100644
index 00000000..594a0879
--- /dev/null
+++ b/prompting/miners/hf_miner.py
@@ -0,0 +1,154 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import time
+import torch
+import argparse
+import bittensor as bt
+
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+from prompting.llm import load_pipeline
+from prompting.llm import HuggingFaceLLM
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+
+
+class HuggingFaceMiner(BasePromptingMiner):
+    """
+    Base 🤗 Hugging Face miner, integrated with hf pipeline.    
+    To run this miner from the project root directory:
+
+    python neurons/miners/huggingface/miner.py --wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> --neuron.model_id <model_id> --subtensor.network <network> --netuid <netuid> --axon.port <port> --axon.external_port <port> --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
+    """
+    @classmethod
+    def add_args(cls, parser: argparse.ArgumentParser):
+        """
+        Adds arguments to the command line parser.
+        """
+        super().add_args(parser)
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+        model_kwargs = None
+        if self.config.neuron.load_in_8bit:
+            bt.logging.info("Loading 8 bit quantized model...")
+            model_kwargs = dict(
+                torch_dtype=torch.float16,
+                load_in_8bit=True,
+            )
+
+        if self.config.neuron.load_in_4bit:
+            bt.logging.info("Loading 4 bit quantized model...")
+            model_kwargs = dict(
+                torch_dtype=torch.float32,
+                load_in_4bit=True,
+            )
+
+        if self.config.wandb.on:
+            self.identity_tags = ("hf_miner", )
+
+            if self.config.neuron.load_in_8bit:
+                self.identity_tags += ("8bit_quantization", )            
+            elif self.config.neuron.load_in_4bit:
+                self.identity_tags += ("4bit_quantization", )
+        
+        # Forces model loading behaviour over mock flag 
+        mock = False if self.config.neuron.should_force_model_loading else self.config.mock
+
+        self.llm_pipeline = load_pipeline(
+            model_id=self.config.neuron.model_id,            
+            device=self.device,
+            mock=mock,
+            model_kwargs=model_kwargs,
+        )        
+
+        self.model_id = self.config.neuron.model_id
+        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
+
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
+        """
+        Processes the incoming synapse by performing a predefined operation on the input data.
+        This method should be replaced with actual logic relevant to the miner's purpose.
+
+        Args:
+            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
+
+        Returns:
+            PromptingSynapse: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
+
+        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
+        the miner's intended operation. This method demonstrates a basic transformation of input data.
+        """
+
+        try:
+            t0 = time.time()
+            bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+
+            prompt = synapse.messages[-1]
+            bt.logging.debug(f"💬 Querying {self.model_id}: {prompt}")
+
+            response = HuggingFaceLLM(
+                llm_pipeline=self.llm_pipeline,
+                system_prompt=self.system_prompt,
+                max_new_tokens=self.config.neuron.max_tokens,
+                do_sample=self.config.neuron.do_sample,
+                temperature=self.config.neuron.temperature,
+                top_k=self.config.neuron.top_k,
+                top_p=self.config.neuron.top_p,
+            ).query(
+                message=prompt,  # For now we just take the last message
+                role="user",
+                disregard_system_prompt=False,
+            )
+
+            synapse.completion = response
+            synapse_latency = time.time() - t0
+            
+            if self.config.wandb.on:
+                # TODO: Add system prompt to wandb config and not on every step
+                self.log_event(
+                    timing=synapse_latency,
+                    prompt=prompt,
+                    completion=response,
+                    system_prompt=self.system_prompt,
+                )
+
+            bt.logging.debug(f"✅ Served Response: {response}")
+            torch.cuda.empty_cache()
+
+        except Exception as e:
+            bt.logging.error(f"Error: {e}")
+            synapse.completion = "Error: " + str(e)
+        finally:             
+            if self.config.neuron.stop_on_forward_exception:
+                self.should_exit = True
+            return synapse
+
+
+# This is the main function, which runs the miner.
+if __name__ == "__main__":
+    with HuggingFaceMiner() as miner:
+        while True:
+            bt.logging.info("Miner running...", time.time())
+            time.sleep(5)
+
+            if miner.should_exit:
+                bt.logging.warning("Ending miner...")
+                break   
diff --git a/prompting/miners/mock.py b/prompting/miners/mock.py
new file mode 100644
index 00000000..b6dd0f32
--- /dev/null
+++ b/prompting/miners/mock.py
@@ -0,0 +1,51 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+import typing
+import bittensor as bt
+
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+
+
+class MockMiner(BasePromptingMiner):
+    """
+    This little fella responds with a static message.
+    """
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+
+    async def forward(
+        self, synapse: PromptingSynapse
+    ) -> PromptingSynapse:
+
+        synapse.completion = f'Hey you reached mock miner {self.config.wallet.hotkey!r}. Please leave a message after the tone.. Beep!'
+        bt.logging.success(f"✅ Mock miner replied with {synapse.completion}")
+
+        return synapse
+
+    async def blacklist(
+        self, synapse: PromptingSynapse
+    ) -> typing.Tuple[bool, str]:
+        return False, 'All good here'
+
+    async def priority(self, synapse: PromptingSynapse) -> float:
+        return 1e6
\ No newline at end of file
diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
new file mode 100644
index 00000000..2e0b92a9
--- /dev/null
+++ b/prompting/miners/openai_miner.py
@@ -0,0 +1,150 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import os
+import time
+import bittensor as bt
+import argparse
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+
+from langchain.prompts import ChatPromptTemplate
+from langchain_core.output_parsers import StrOutputParser
+from langchain.chat_models import ChatOpenAI
+from dotenv import load_dotenv, find_dotenv
+from langchain.callbacks import get_openai_callback
+
+
+
+class OpenAIMiner(BasePromptingMiner):
+    """Langchain-based miner which uses OpenAI's API as the LLM.
+
+    You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
+    """
+    @classmethod
+    def add_args(cls, parser: argparse.ArgumentParser):
+        """
+        Adds OpenAI-specific arguments to the command line parser.
+        """
+        super().add_args(parser)
+
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+        bt.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
+
+        if self.config.wandb.on:
+            self.identity_tags =  ("openai_miner", ) + (self.config.neuron.model_id, )
+        
+        _ = load_dotenv(find_dotenv()) 
+        api_key = os.environ.get("OPENAI_API_KEY")        
+
+        # Set openai key and other args
+        self.model = ChatOpenAI(
+            api_key=api_key,
+            model_name=self.config.neuron.model_id,
+            max_tokens = self.config.neuron.max_tokens,
+            temperature = self.config.neuron.temperature,            
+        )
+
+        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
+        self.accumulated_total_tokens = 0
+        self.accumulated_prompt_tokens = 0
+        self.accumulated_completion_tokens = 0
+        self.accumulated_total_cost = 0
+
+    def get_cost_logging(self, cb):
+        bt.logging.info(f"Total Tokens: {cb.total_tokens}")
+        bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
+        bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
+        bt.logging.info(f"Total Cost (USD): ${round(cb.total_cost,4)}")
+
+        self.accumulated_total_tokens += cb.total_tokens
+        self.accumulated_prompt_tokens += cb.prompt_tokens
+        self.accumulated_completion_tokens += cb.completion_tokens
+        self.accumulated_total_cost += cb.total_cost
+
+        return  {
+            'total_tokens': cb.total_tokens,
+            'prompt_tokens': cb.prompt_tokens,
+            'completion_tokens': cb.completion_tokens,
+            'total_cost': cb.total_cost,
+            'accumulated_total_tokens': self.accumulated_total_tokens,
+            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
+            'accumulated_completion_tokens': self.accumulated_completion_tokens,
+            'accumulated_total_cost': self.accumulated_total_cost,
+        }
+
+    async def forward(
+        self, synapse: PromptingSynapse
+    ) -> PromptingSynapse:
+        """
+        Processes the incoming synapse by performing a predefined operation on the input data.
+        This method should be replaced with actual logic relevant to the miner's purpose.
+
+        Args:
+            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
+
+        Returns:
+            PromptingSynapse: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
+
+        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
+        the miner's intended operation. This method demonstrates a basic transformation of input data.
+        """
+        try:
+            with get_openai_callback() as cb:
+                t0 = time.time()
+                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+
+                prompt = ChatPromptTemplate.from_messages([
+                    ("system", self.system_prompt),
+                    ("user", "{input}")
+                ])
+                chain = prompt | self.model | StrOutputParser()
+
+                role = synapse.roles[-1]
+                message = synapse.messages[-1]
+                
+                bt.logging.debug(f"💬 Querying openai: {prompt}")
+                response = chain.invoke(
+                    {"role": role, "input": message}
+                )
+
+                synapse.completion = response
+                synapse_latency = time.time() - t0
+
+                if self.config.wandb.on:
+                    self.log_event(
+                        timing=synapse_latency, 
+                        prompt=message,
+                        completion=response,
+                        system_prompt=self.system_prompt,
+                        extra_info=self.get_cost_logging(cb)
+                    )
+
+            bt.logging.debug(f"✅ Served Response: {response}")
+            return synapse
+        except Exception as e:
+            bt.logging.error(f"Error in forward: {e}")
+            synapse.completion = "Error: " + str(e)
+        finally:
+            if self.config.neuron.stop_on_forward_exception:
+                self.should_exit = True
+            return synapse
\ No newline at end of file
diff --git a/prompting/miners/phrase.py b/prompting/miners/phrase.py
new file mode 100644
index 00000000..a7f6993d
--- /dev/null
+++ b/prompting/miners/phrase.py
@@ -0,0 +1,64 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+import typing
+import argparse
+import bittensor as bt
+
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+
+
+class PhraseMiner(BasePromptingMiner):
+    """
+    This little fella responds with whatever phrase you give it.
+    """
+
+    @classmethod
+    def add_args(cls, parser: argparse.ArgumentParser):
+        
+        super().add_args(parser)
+
+        parser.add_argument(
+            "--neuron.phrase",
+            type=str,
+            help="The phrase to use when running a phrase (test) miner.",
+            default="Can you please repeat that?",
+        )
+    
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+
+    async def forward(
+        self, synapse: PromptingSynapse
+    ) -> PromptingSynapse:
+
+        synapse.completion = self.config.neuron.phrase
+        bt.logging.success(f"✅ Phrase miner replied with {synapse.completion}")
+
+        return synapse
+
+    async def blacklist(
+        self, synapse: PromptingSynapse
+    ) -> typing.Tuple[bool, str]:
+        return False, 'All good here'
+
+    async def priority(self, synapse: PromptingSynapse) -> float:
+        return 1e6
diff --git a/neurons/miners/wiki_agent/agent.py b/prompting/miners/wiki_agent.py
similarity index 99%
rename from neurons/miners/wiki_agent/agent.py
rename to prompting/miners/wiki_agent.py
index c89370bd..062c17d4 100644
--- a/neurons/miners/wiki_agent/agent.py
+++ b/prompting/miners/wiki_agent.py
@@ -56,8 +56,6 @@ def format(self, **kwargs) -> str:
         # Create a list of tool names for the tools provided
         kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
         return self.template.format(**kwargs)
-    
-
 
 
 class CustomOutputParser(AgentOutputParser):
@@ -81,8 +79,6 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
         return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
 
 
-
-
 class WikiAgent:
     def __init__(self, model_id: str, model_temperature: float):
         self.wikipedia = WikipediaAPIWrapper()
@@ -93,7 +89,6 @@ def __init__(self, model_id: str, model_temperature: float):
                 description="Useful for when you need to look up a topic, country or person on wikipedia"
         )]
 
-
         prompt = CustomPromptTemplate(
             template=template,
             tools=tools,
@@ -117,7 +112,7 @@ def __init__(self, model_id: str, model_temperature: float):
         )
 
         self.agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
-        
+
 
     def run(self, input: str) -> str:
         return self.agent_executor.run(input)
\ No newline at end of file
diff --git a/prompting/miners/wiki_agent_miner.py b/prompting/miners/wiki_agent_miner.py
new file mode 100644
index 00000000..df6d76cf
--- /dev/null
+++ b/prompting/miners/wiki_agent_miner.py
@@ -0,0 +1,129 @@
+# The MIT License (MIT)
+# Copyright © 2024 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import time
+import bittensor as bt
+import argparse
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+from dotenv import load_dotenv, find_dotenv
+from prompting.miners.wiki_agent import WikiAgent
+from langchain.callbacks import get_openai_callback
+
+
+class WikipediaAgentMiner(BasePromptingMiner):
+    """Langchain-based miner which uses OpenAI's API as the LLM. This uses the ReAct framework.
+    
+    You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
+    """
+    @classmethod
+    def add_args(cls, parser: argparse.ArgumentParser):
+        """
+        Adds OpenAI-specific arguments to the command line parser.
+        """
+        super().add_args(parser)
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+        
+        bt.logging.info(f"🤖📖 Initializing wikipedia agent with model {self.config.neuron.model_id}...")
+
+        if self.config.wandb.on:
+            self.identity_tags = ("wikipedia_agent_miner", ) + (self.config.neuron.model_id, )
+        
+        _ = load_dotenv(find_dotenv()) 
+                
+        self.agent = WikiAgent(self.config.neuron.model_id, self.config.neuron.temperature)
+        self.accumulated_total_tokens = 0
+        self.accumulated_prompt_tokens = 0
+        self.accumulated_completion_tokens = 0
+        self.accumulated_total_cost = 0
+
+
+    def get_cost_logging(self, cb):
+        bt.logging.info(f"Total Tokens: {cb.total_tokens}")
+        bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
+        bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
+        bt.logging.info(f"Total Cost (USD): ${cb.total_cost}")
+
+        self.accumulated_total_tokens += cb.total_tokens
+        self.accumulated_prompt_tokens += cb.prompt_tokens
+        self.accumulated_completion_tokens += cb.completion_tokens
+        self.accumulated_total_cost += cb.total_cost
+
+        return  {
+            'total_tokens': cb.total_tokens,
+            'prompt_tokens': cb.prompt_tokens,
+            'completion_tokens': cb.completion_tokens,
+            'total_cost': cb.total_cost,
+            'accumulated_total_tokens': self.accumulated_total_tokens,
+            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
+            'accumulated_completion_tokens': self.accumulated_completion_tokens,
+            'accumulated_total_cost': self.accumulated_total_cost,
+        }
+
+
+    async def forward(
+        self, synapse: PromptingSynapse
+    ) -> PromptingSynapse:
+        """
+        Processes the incoming synapse by performing a predefined operation on the input data.
+        This method should be replaced with actual logic relevant to the miner's purpose.
+
+        Args:
+            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
+
+        Returns:
+            PromptingSynapse: The synapse object with the '`dummy_output' field set to twice the 'dummy_input' value.
+
+        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
+        the miner's intended operation. This method demonstrates a basic transformation of input data.
+        """
+        try:
+            with get_openai_callback() as cb:
+                t0 = time.time()
+                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+                            
+                message = synapse.messages[-1]
+                
+                bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")
+                
+                response = self.agent.run(message)
+
+                synapse.completion = response
+                synapse_latency = time.time() - t0
+
+                if self.config.wandb.on:
+                    self.log_event(
+                        timing=synapse_latency, 
+                        prompt=message,
+                        completion=response,
+                        system_prompt='',
+                        extra_info=self.get_cost_logging(cb)
+                    )
+
+            bt.logging.debug(f"✅ Served Response: {response}")
+            return synapse
+        except Exception as e:
+            bt.logging.error(f"Error in forward: {e}")
+            synapse.completion = "Error: " + str(e)
+        finally:
+            if self.config.neuron.stop_on_forward_exception:
+                self.should_exit = True
+            return synapse
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 06d47650..516b018e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -16,3 +16,6 @@ wandb==0.15.10
 tenacity
 antlr4-python3-runtime==4.11
 wikipedia
+openai==1.9.0
+langchain==0.1.0
+python-dotenv

From c2d40f6f7206f2158d403f8a6588cd7789f34d1b Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Wed, 31 Jan 2024 16:54:36 +0000
Subject: [PATCH 08/34] runs black on miners code

---
 neurons/miners/huggingface/miner.py  |  2 +-
 neurons/miners/openai/miner.py       |  2 +-
 neurons/miners/test/echo.py          |  2 +-
 prompting/miners/__init__.py         |  1 -
 prompting/miners/echo.py             | 14 ++-----
 prompting/miners/hf_miner.py         | 27 ++++++------
 prompting/miners/mock.py             | 16 +++----
 prompting/miners/openai_miner.py     | 58 ++++++++++++-------------
 prompting/miners/phrase.py           | 15 ++-----
 prompting/miners/wiki_agent.py       | 49 +++++++++++++++-------
 prompting/miners/wiki_agent_miner.py | 63 +++++++++++++++-------------
 11 files changed, 125 insertions(+), 124 deletions(-)

diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index d452c0b1..d0c65891 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -28,4 +28,4 @@
 
             if miner.should_exit:
                 bt.logging.warning("Ending miner...")
-                break   
+                break
diff --git a/neurons/miners/openai/miner.py b/neurons/miners/openai/miner.py
index a7d77b0d..158c6beb 100644
--- a/neurons/miners/openai/miner.py
+++ b/neurons/miners/openai/miner.py
@@ -27,4 +27,4 @@
 
             if miner.should_exit:
                 bt.logging.warning("Ending miner...")
-                break   
+                break
diff --git a/neurons/miners/test/echo.py b/neurons/miners/test/echo.py
index 5313c648..a94e755e 100644
--- a/neurons/miners/test/echo.py
+++ b/neurons/miners/test/echo.py
@@ -23,4 +23,4 @@
     with EchoMiner() as miner:
         while True:
             bt.logging.info("Miner running...", time.time())
-            time.sleep(5)
\ No newline at end of file
+            time.sleep(5)
diff --git a/prompting/miners/__init__.py b/prompting/miners/__init__.py
index f0885316..ec76d2d9 100644
--- a/prompting/miners/__init__.py
+++ b/prompting/miners/__init__.py
@@ -7,4 +7,3 @@
 from .hf_miner import HuggingFaceMiner
 from .openai_miner import OpenAIMiner
 from .wiki_agent_miner import WikipediaAgentMiner
-
diff --git a/prompting/miners/echo.py b/prompting/miners/echo.py
index 9106729c..e534b46e 100644
--- a/prompting/miners/echo.py
+++ b/prompting/miners/echo.py
@@ -32,21 +32,15 @@ class EchoMiner(BasePromptingMiner):
     def __init__(self, config=None):
         super().__init__(config=config)
 
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         synapse.completion = synapse.messages[-1]
 
-        bt.logging.success(f'✅ Echoing the message {synapse.completion}...')
+        bt.logging.success(f"✅ Echoing the message {synapse.completion}...")
 
         return synapse
 
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
+    async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
+        return False, "All good here"
 
     async def priority(self, synapse: PromptingSynapse) -> float:
         return 1e6
diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
index 594a0879..c7ae016d 100644
--- a/prompting/miners/hf_miner.py
+++ b/prompting/miners/hf_miner.py
@@ -31,11 +31,12 @@
 
 class HuggingFaceMiner(BasePromptingMiner):
     """
-    Base 🤗 Hugging Face miner, integrated with hf pipeline.    
+    Base 🤗 Hugging Face miner, integrated with hf pipeline.
     To run this miner from the project root directory:
 
     python neurons/miners/huggingface/miner.py --wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> --neuron.model_id <model_id> --subtensor.network <network> --netuid <netuid> --axon.port <port> --axon.external_port <port> --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
     """
+
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
         """
@@ -62,22 +63,24 @@ def __init__(self, config=None):
             )
 
         if self.config.wandb.on:
-            self.identity_tags = ("hf_miner", )
+            self.identity_tags = ("hf_miner",)
 
             if self.config.neuron.load_in_8bit:
-                self.identity_tags += ("8bit_quantization", )            
+                self.identity_tags += ("8bit_quantization",)
             elif self.config.neuron.load_in_4bit:
-                self.identity_tags += ("4bit_quantization", )
-        
-        # Forces model loading behaviour over mock flag 
-        mock = False if self.config.neuron.should_force_model_loading else self.config.mock
+                self.identity_tags += ("4bit_quantization",)
+
+        # Forces model loading behaviour over mock flag
+        mock = (
+            False if self.config.neuron.should_force_model_loading else self.config.mock
+        )
 
         self.llm_pipeline = load_pipeline(
-            model_id=self.config.neuron.model_id,            
+            model_id=self.config.neuron.model_id,
             device=self.device,
             mock=mock,
             model_kwargs=model_kwargs,
-        )        
+        )
 
         self.model_id = self.config.neuron.model_id
         self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
@@ -120,7 +123,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
 
             synapse.completion = response
             synapse_latency = time.time() - t0
-            
+
             if self.config.wandb.on:
                 # TODO: Add system prompt to wandb config and not on every step
                 self.log_event(
@@ -136,7 +139,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         except Exception as e:
             bt.logging.error(f"Error: {e}")
             synapse.completion = "Error: " + str(e)
-        finally:             
+        finally:
             if self.config.neuron.stop_on_forward_exception:
                 self.should_exit = True
             return synapse
@@ -151,4 +154,4 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
 
             if miner.should_exit:
                 bt.logging.warning("Ending miner...")
-                break   
+                break
diff --git a/prompting/miners/mock.py b/prompting/miners/mock.py
index b6dd0f32..25b33da3 100644
--- a/prompting/miners/mock.py
+++ b/prompting/miners/mock.py
@@ -32,20 +32,14 @@ class MockMiner(BasePromptingMiner):
     def __init__(self, config=None):
         super().__init__(config=config)
 
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
-        synapse.completion = f'Hey you reached mock miner {self.config.wallet.hotkey!r}. Please leave a message after the tone.. Beep!'
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
+        synapse.completion = f"Hey you reached mock miner {self.config.wallet.hotkey!r}. Please leave a message after the tone.. Beep!"
         bt.logging.success(f"✅ Mock miner replied with {synapse.completion}")
 
         return synapse
 
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
+    async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
+        return False, "All good here"
 
     async def priority(self, synapse: PromptingSynapse) -> float:
-        return 1e6
\ No newline at end of file
+        return 1e6
diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index 2e0b92a9..aa789b52 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -19,8 +19,10 @@
 import time
 import bittensor as bt
 import argparse
+
 # Bittensor Miner Template:
 from prompting.protocol import PromptingSynapse
+
 # import base miner class which takes care of most of the boilerplate
 from prompting.base.prompting_miner import BasePromptingMiner
 
@@ -31,12 +33,12 @@
 from langchain.callbacks import get_openai_callback
 
 
-
 class OpenAIMiner(BasePromptingMiner):
     """Langchain-based miner which uses OpenAI's API as the LLM.
 
     You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
     """
+
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
         """
@@ -44,24 +46,23 @@ def add_args(cls, parser: argparse.ArgumentParser):
         """
         super().add_args(parser)
 
-
     def __init__(self, config=None):
         super().__init__(config=config)
 
         bt.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
 
         if self.config.wandb.on:
-            self.identity_tags =  ("openai_miner", ) + (self.config.neuron.model_id, )
-        
-        _ = load_dotenv(find_dotenv()) 
-        api_key = os.environ.get("OPENAI_API_KEY")        
+            self.identity_tags = ("openai_miner",) + (self.config.neuron.model_id,)
+
+        _ = load_dotenv(find_dotenv())
+        api_key = os.environ.get("OPENAI_API_KEY")
 
         # Set openai key and other args
         self.model = ChatOpenAI(
             api_key=api_key,
             model_name=self.config.neuron.model_id,
-            max_tokens = self.config.neuron.max_tokens,
-            temperature = self.config.neuron.temperature,            
+            max_tokens=self.config.neuron.max_tokens,
+            temperature=self.config.neuron.temperature,
         )
 
         self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
@@ -81,20 +82,18 @@ def get_cost_logging(self, cb):
         self.accumulated_completion_tokens += cb.completion_tokens
         self.accumulated_total_cost += cb.total_cost
 
-        return  {
-            'total_tokens': cb.total_tokens,
-            'prompt_tokens': cb.prompt_tokens,
-            'completion_tokens': cb.completion_tokens,
-            'total_cost': cb.total_cost,
-            'accumulated_total_tokens': self.accumulated_total_tokens,
-            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
-            'accumulated_completion_tokens': self.accumulated_completion_tokens,
-            'accumulated_total_cost': self.accumulated_total_cost,
+        return {
+            "total_tokens": cb.total_tokens,
+            "prompt_tokens": cb.prompt_tokens,
+            "completion_tokens": cb.completion_tokens,
+            "total_cost": cb.total_cost,
+            "accumulated_total_tokens": self.accumulated_total_tokens,
+            "accumulated_prompt_tokens": self.accumulated_prompt_tokens,
+            "accumulated_completion_tokens": self.accumulated_completion_tokens,
+            "accumulated_total_cost": self.accumulated_total_cost,
         }
 
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         """
         Processes the incoming synapse by performing a predefined operation on the input data.
         This method should be replaced with actual logic relevant to the miner's purpose.
@@ -113,30 +112,27 @@ async def forward(
                 t0 = time.time()
                 bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
 
-                prompt = ChatPromptTemplate.from_messages([
-                    ("system", self.system_prompt),
-                    ("user", "{input}")
-                ])
+                prompt = ChatPromptTemplate.from_messages(
+                    [("system", self.system_prompt), ("user", "{input}")]
+                )
                 chain = prompt | self.model | StrOutputParser()
 
                 role = synapse.roles[-1]
                 message = synapse.messages[-1]
-                
+
                 bt.logging.debug(f"💬 Querying openai: {prompt}")
-                response = chain.invoke(
-                    {"role": role, "input": message}
-                )
+                response = chain.invoke({"role": role, "input": message})
 
                 synapse.completion = response
                 synapse_latency = time.time() - t0
 
                 if self.config.wandb.on:
                     self.log_event(
-                        timing=synapse_latency, 
+                        timing=synapse_latency,
                         prompt=message,
                         completion=response,
                         system_prompt=self.system_prompt,
-                        extra_info=self.get_cost_logging(cb)
+                        extra_info=self.get_cost_logging(cb),
                     )
 
             bt.logging.debug(f"✅ Served Response: {response}")
@@ -147,4 +143,4 @@ async def forward(
         finally:
             if self.config.neuron.stop_on_forward_exception:
                 self.should_exit = True
-            return synapse
\ No newline at end of file
+            return synapse
diff --git a/prompting/miners/phrase.py b/prompting/miners/phrase.py
index a7f6993d..8d4cbc1d 100644
--- a/prompting/miners/phrase.py
+++ b/prompting/miners/phrase.py
@@ -32,7 +32,6 @@ class PhraseMiner(BasePromptingMiner):
 
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
-        
         super().add_args(parser)
 
         parser.add_argument(
@@ -41,24 +40,18 @@ def add_args(cls, parser: argparse.ArgumentParser):
             help="The phrase to use when running a phrase (test) miner.",
             default="Can you please repeat that?",
         )
-    
+
     def __init__(self, config=None):
         super().__init__(config=config)
 
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
-
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         synapse.completion = self.config.neuron.phrase
         bt.logging.success(f"✅ Phrase miner replied with {synapse.completion}")
 
         return synapse
 
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
-        return False, 'All good here'
+    async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
+        return False, "All good here"
 
     async def priority(self, synapse: PromptingSynapse) -> float:
         return 1e6
diff --git a/prompting/miners/wiki_agent.py b/prompting/miners/wiki_agent.py
index 062c17d4..5ea1485b 100644
--- a/prompting/miners/wiki_agent.py
+++ b/prompting/miners/wiki_agent.py
@@ -1,6 +1,11 @@
 from langchain.utilities import WikipediaAPIWrapper
 from langchain.agents import Tool
-from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
+from langchain.agents import (
+    Tool,
+    AgentExecutor,
+    LLMSingleActionAgent,
+    AgentOutputParser,
+)
 from langchain.schema import AgentAction, AgentFinish, OutputParserException
 import re
 import bittensor as bt
@@ -11,7 +16,12 @@
 from langchain.agents import Tool
 from langchain.agents import initialize_agent
 from langchain.chains import LLMChain
-from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
+from langchain.agents import (
+    Tool,
+    AgentExecutor,
+    LLMSingleActionAgent,
+    AgentOutputParser,
+)
 
 
 # Set up the base template
@@ -52,7 +62,9 @@ def format(self, **kwargs) -> str:
         # Set the agent_scratchpad variable to that value
         kwargs["agent_scratchpad"] = thoughts
         # Create a tools variable from the list of tools provided
-        kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
+        kwargs["tools"] = "\n".join(
+            [f"{tool.name}: {tool.description}" for tool in self.tools]
+        )
         # Create a list of tool names for the tools provided
         kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
         return self.template.format(**kwargs)
@@ -76,31 +88,35 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
         action = match.group(1).strip()
         action_input = match.group(2)
         # Return the action and action input
-        return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
+        return AgentAction(
+            tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
+        )
 
 
 class WikiAgent:
     def __init__(self, model_id: str, model_temperature: float):
         self.wikipedia = WikipediaAPIWrapper()
-        tools = [    
+        tools = [
             Tool(
-                name='wikipedia',
-                func= self.wikipedia.run,
-                description="Useful for when you need to look up a topic, country or person on wikipedia"
-        )]
+                name="wikipedia",
+                func=self.wikipedia.run,
+                description="Useful for when you need to look up a topic, country or person on wikipedia",
+            )
+        ]
 
         prompt = CustomPromptTemplate(
             template=template,
             tools=tools,
             # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
             # This includes the `intermediate_steps` variable because that is needed
-            input_variables=["input", "intermediate_steps"]
+            input_variables=["input", "intermediate_steps"],
         )
 
-        bt.logging.info(f"Initializing agent with model_id: {model_id} and model_temperature: {model_temperature}")
+        bt.logging.info(
+            f"Initializing agent with model_id: {model_id} and model_temperature: {model_temperature}"
+        )
         llm = OpenAI(model_name=model_id, temperature=model_temperature)
 
-
         llm_chain = LLMChain(llm=llm, prompt=prompt)
         output_parser = CustomOutputParser()
 
@@ -108,11 +124,12 @@ def __init__(self, model_id: str, model_temperature: float):
             llm_chain=llm_chain,
             output_parser=output_parser,
             stop=["\nObservation:"],
-            allowed_tools=tools,            
+            allowed_tools=tools,
         )
 
-        self.agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
-
+        self.agent_executor = AgentExecutor.from_agent_and_tools(
+            agent=agent, tools=tools, verbose=True, handle_parsing_errors=True
+        )
 
     def run(self, input: str) -> str:
-        return self.agent_executor.run(input)
\ No newline at end of file
+        return self.agent_executor.run(input)
diff --git a/prompting/miners/wiki_agent_miner.py b/prompting/miners/wiki_agent_miner.py
index df6d76cf..95f61196 100644
--- a/prompting/miners/wiki_agent_miner.py
+++ b/prompting/miners/wiki_agent_miner.py
@@ -18,8 +18,10 @@
 import time
 import bittensor as bt
 import argparse
+
 # Bittensor Miner Template:
 from prompting.protocol import PromptingSynapse
+
 # import base miner class which takes care of most of the boilerplate
 from prompting.base.prompting_miner import BasePromptingMiner
 from dotenv import load_dotenv, find_dotenv
@@ -29,9 +31,10 @@
 
 class WikipediaAgentMiner(BasePromptingMiner):
     """Langchain-based miner which uses OpenAI's API as the LLM. This uses the ReAct framework.
-    
+
     You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
     """
+
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
         """
@@ -41,21 +44,26 @@ def add_args(cls, parser: argparse.ArgumentParser):
 
     def __init__(self, config=None):
         super().__init__(config=config)
-        
-        bt.logging.info(f"🤖📖 Initializing wikipedia agent with model {self.config.neuron.model_id}...")
+
+        bt.logging.info(
+            f"🤖📖 Initializing wikipedia agent with model {self.config.neuron.model_id}..."
+        )
 
         if self.config.wandb.on:
-            self.identity_tags = ("wikipedia_agent_miner", ) + (self.config.neuron.model_id, )
-        
-        _ = load_dotenv(find_dotenv()) 
-                
-        self.agent = WikiAgent(self.config.neuron.model_id, self.config.neuron.temperature)
+            self.identity_tags = ("wikipedia_agent_miner",) + (
+                self.config.neuron.model_id,
+            )
+
+        _ = load_dotenv(find_dotenv())
+
+        self.agent = WikiAgent(
+            self.config.neuron.model_id, self.config.neuron.temperature
+        )
         self.accumulated_total_tokens = 0
         self.accumulated_prompt_tokens = 0
         self.accumulated_completion_tokens = 0
         self.accumulated_total_cost = 0
 
-
     def get_cost_logging(self, cb):
         bt.logging.info(f"Total Tokens: {cb.total_tokens}")
         bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
@@ -67,21 +75,18 @@ def get_cost_logging(self, cb):
         self.accumulated_completion_tokens += cb.completion_tokens
         self.accumulated_total_cost += cb.total_cost
 
-        return  {
-            'total_tokens': cb.total_tokens,
-            'prompt_tokens': cb.prompt_tokens,
-            'completion_tokens': cb.completion_tokens,
-            'total_cost': cb.total_cost,
-            'accumulated_total_tokens': self.accumulated_total_tokens,
-            'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
-            'accumulated_completion_tokens': self.accumulated_completion_tokens,
-            'accumulated_total_cost': self.accumulated_total_cost,
+        return {
+            "total_tokens": cb.total_tokens,
+            "prompt_tokens": cb.prompt_tokens,
+            "completion_tokens": cb.completion_tokens,
+            "total_cost": cb.total_cost,
+            "accumulated_total_tokens": self.accumulated_total_tokens,
+            "accumulated_prompt_tokens": self.accumulated_prompt_tokens,
+            "accumulated_completion_tokens": self.accumulated_completion_tokens,
+            "accumulated_total_cost": self.accumulated_total_cost,
         }
 
-
-    async def forward(
-        self, synapse: PromptingSynapse
-    ) -> PromptingSynapse:
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         """
         Processes the incoming synapse by performing a predefined operation on the input data.
         This method should be replaced with actual logic relevant to the miner's purpose.
@@ -99,11 +104,11 @@ async def forward(
             with get_openai_callback() as cb:
                 t0 = time.time()
                 bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
-                            
+
                 message = synapse.messages[-1]
-                
+
                 bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")
-                
+
                 response = self.agent.run(message)
 
                 synapse.completion = response
@@ -111,11 +116,11 @@ async def forward(
 
                 if self.config.wandb.on:
                     self.log_event(
-                        timing=synapse_latency, 
+                        timing=synapse_latency,
                         prompt=message,
                         completion=response,
-                        system_prompt='',
-                        extra_info=self.get_cost_logging(cb)
+                        system_prompt="",
+                        extra_info=self.get_cost_logging(cb),
                     )
 
             bt.logging.debug(f"✅ Served Response: {response}")
@@ -126,4 +131,4 @@ async def forward(
         finally:
             if self.config.neuron.stop_on_forward_exception:
                 self.should_exit = True
-            return synapse
\ No newline at end of file
+            return synapse

From 9669cab83f2044fd1230643f512ec2e5194c6919 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Wed, 31 Jan 2024 21:57:59 +0000
Subject: [PATCH 09/34] properly adds miner integration with system prompt args

---
 prompting/miners/hf_miner.py     | 2 +-
 prompting/miners/openai_miner.py | 2 +-
 prompting/utils/config.py        | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
index c7ae016d..f4540352 100644
--- a/prompting/miners/hf_miner.py
+++ b/prompting/miners/hf_miner.py
@@ -83,7 +83,7 @@ def __init__(self, config=None):
         )
 
         self.model_id = self.config.neuron.model_id
-        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
+        self.system_prompt = self.config.neuron.system_prompt
 
     async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
         """
diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index aa789b52..c885e142 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -65,7 +65,7 @@ def __init__(self, config=None):
             temperature=self.config.neuron.temperature,
         )
 
-        self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
+        self.system_prompt = self.config.neuron.system_prompt
         self.accumulated_total_tokens = 0
         self.accumulated_prompt_tokens = 0
         self.accumulated_completion_tokens = 0
diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index 009b7197..48e6c52c 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -178,7 +178,7 @@ def add_miner_args(cls, parser):
         "--neuron.system_prompt",
         type=str,
         help="The system prompt to use for the miner.",
-        default="You are a helpful AI assistant. You answer questions, summarize documents, and debug code. You are always straight to the point and honest.",
+        default="You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
     )
 
     parser.add_argument(

From dde38408e0122454e1115a6aac019ad9f33e88cd Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Thu, 1 Feb 2024 20:33:02 +0000
Subject: [PATCH 10/34] adds check verification on global logger definition

---
 prompting/utils/config.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index 48e6c52c..0abb1875 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -40,7 +40,8 @@ def check_config(cls, config: "bt.Config"):
     if not os.path.exists(config.neuron.full_path):
         os.makedirs(config.neuron.full_path, exist_ok=True)
 
-    if not config.neuron.dont_save_events:
+    log_level_exists = "EVENTS" in logger._core.levels
+    if not config.neuron.dont_save_events and not log_level_exists:
         # Add custom event logger for the events.
         logger.level("EVENTS", no=38, icon="📝")
         logger.add(

From 9b5b91fc7124c80a62e8e219ef2edb0cf128ca6e Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Thu, 1 Feb 2024 20:45:08 +0000
Subject: [PATCH 11/34] clean main func from hf miner

---
 prompting/miners/hf_miner.py | 11 -----------
 1 file changed, 11 deletions(-)

diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
index f4540352..2e3c1e50 100644
--- a/prompting/miners/hf_miner.py
+++ b/prompting/miners/hf_miner.py
@@ -144,14 +144,3 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
                 self.should_exit = True
             return synapse
 
-
-# This is the main function, which runs the miner.
-if __name__ == "__main__":
-    with HuggingFaceMiner() as miner:
-        while True:
-            bt.logging.info("Miner running...", time.time())
-            time.sleep(5)
-
-            if miner.should_exit:
-                bt.logging.warning("Ending miner...")
-                break

From a8e3bc888bb45d440d2758b4c1d7e5faa48c02be Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Mon, 5 Feb 2024 18:55:57 +0000
Subject: [PATCH 12/34] refactor agent code + adds react agent

---
 .../miners/{wiki_agent => agent}/README.md    |  0
 neurons/miners/{wiki_agent => agent}/miner.py |  4 +-
 prompting/miners/__init__.py                  |  2 +-
 .../{wiki_agent_miner.py => agent_miner.py}   | 32 ++++++++++---
 prompting/miners/agents/__init__.py           |  4 ++
 prompting/miners/agents/base_agent.py         |  5 +++
 prompting/miners/agents/react_agent.py        | 45 +++++++++++++++++++
 .../single_action_agent.py}                   | 35 +++++++++------
 prompting/miners/agents/utils.py              | 38 ++++++++++++++++
 9 files changed, 144 insertions(+), 21 deletions(-)
 rename neurons/miners/{wiki_agent => agent}/README.md (100%)
 rename neurons/miners/{wiki_agent => agent}/miner.py (94%)
 rename prompting/miners/{wiki_agent_miner.py => agent_miner.py} (85%)
 create mode 100644 prompting/miners/agents/__init__.py
 create mode 100644 prompting/miners/agents/base_agent.py
 create mode 100644 prompting/miners/agents/react_agent.py
 rename prompting/miners/{wiki_agent.py => agents/single_action_agent.py} (81%)
 create mode 100644 prompting/miners/agents/utils.py

diff --git a/neurons/miners/wiki_agent/README.md b/neurons/miners/agent/README.md
similarity index 100%
rename from neurons/miners/wiki_agent/README.md
rename to neurons/miners/agent/README.md
diff --git a/neurons/miners/wiki_agent/miner.py b/neurons/miners/agent/miner.py
similarity index 94%
rename from neurons/miners/wiki_agent/miner.py
rename to neurons/miners/agent/miner.py
index e9bfda27..267a8688 100644
--- a/neurons/miners/wiki_agent/miner.py
+++ b/neurons/miners/agent/miner.py
@@ -16,11 +16,11 @@
 # DEALINGS IN THE SOFTWARE.
 import time
 import bittensor as bt
-from prompting.miners import WikipediaAgentMiner
+from prompting.miners import AgentMiner
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
-    with WikipediaAgentMiner() as miner:
+    with AgentMiner() as miner:
         while True:
             bt.logging.info("Miner running...", time.time())
             time.sleep(5)
diff --git a/prompting/miners/__init__.py b/prompting/miners/__init__.py
index ec76d2d9..bcf45b7e 100644
--- a/prompting/miners/__init__.py
+++ b/prompting/miners/__init__.py
@@ -6,4 +6,4 @@
 # Real miners
 from .hf_miner import HuggingFaceMiner
 from .openai_miner import OpenAIMiner
-from .wiki_agent_miner import WikipediaAgentMiner
+from .agent_miner import AgentMiner
diff --git a/prompting/miners/wiki_agent_miner.py b/prompting/miners/agent_miner.py
similarity index 85%
rename from prompting/miners/wiki_agent_miner.py
rename to prompting/miners/agent_miner.py
index 95f61196..080980aa 100644
--- a/prompting/miners/wiki_agent_miner.py
+++ b/prompting/miners/agent_miner.py
@@ -25,11 +25,11 @@
 # import base miner class which takes care of most of the boilerplate
 from prompting.base.prompting_miner import BasePromptingMiner
 from dotenv import load_dotenv, find_dotenv
-from prompting.miners.wiki_agent import WikiAgent
+from prompting.miners.agents import SingleActionAgent, ReactAgent
 from langchain.callbacks import get_openai_callback
 
 
-class WikipediaAgentMiner(BasePromptingMiner):
+class AgentMiner(BasePromptingMiner):
     """Langchain-based miner which uses OpenAI's API as the LLM. This uses the ReAct framework.
 
     You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
@@ -42,6 +42,13 @@ def add_args(cls, parser: argparse.ArgumentParser):
         """
         super().add_args(parser)
 
+        parser.add_argument(
+            "--use_react_agent",
+            type=bool,
+            default=False,
+            help="Flag to enable the ReAct agent",
+        )
+
     def __init__(self, config=None):
         super().__init__(config=config)
 
@@ -56,9 +63,24 @@ def __init__(self, config=None):
 
         _ = load_dotenv(find_dotenv())
 
-        self.agent = WikiAgent(
-            self.config.neuron.model_id, self.config.neuron.temperature
-        )
+        if self.config.use_react_agent:
+            self.agent = ReactAgent(
+                self.config.neuron.model_id, 
+                self.config.neuron.temperature,
+                self.config.neuron.max_new_tokens,
+                self.config.neuron.load_in_8bits,
+                self.config.neuron.load_in_4bits
+            )
+        else:
+            self.agent = SingleActionAgent(
+                self.config.neuron.model_id, 
+                self.config.neuron.temperature,
+                self.config.neuron.max_new_tokens,
+                self.config.neuron.load_in_8bits,
+                self.config.neuron.load_in_4bits
+            )
+
+
         self.accumulated_total_tokens = 0
         self.accumulated_prompt_tokens = 0
         self.accumulated_completion_tokens = 0
diff --git a/prompting/miners/agents/__init__.py b/prompting/miners/agents/__init__.py
new file mode 100644
index 00000000..f3a1d772
--- /dev/null
+++ b/prompting/miners/agents/__init__.py
@@ -0,0 +1,4 @@
+from .base_agent import BaseAgent
+from .single_action_agent import SingleActionAgent
+from .react_agent import ReactAgent
+from .utils import get_tools, load_hf_llm
\ No newline at end of file
diff --git a/prompting/miners/agents/base_agent.py b/prompting/miners/agents/base_agent.py
new file mode 100644
index 00000000..94f38763
--- /dev/null
+++ b/prompting/miners/agents/base_agent.py
@@ -0,0 +1,5 @@
+from abc import ABC
+
+class BaseAgent(ABC):
+    def run(self, input: str) -> str:
+        pass
\ No newline at end of file
diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
new file mode 100644
index 00000000..d9f23777
--- /dev/null
+++ b/prompting/miners/agents/react_agent.py
@@ -0,0 +1,45 @@
+import bittensor as bt
+from prompting.miners.agents import get_tools, load_hf_llm, BaseAgent
+from langchain.agents import AgentExecutor, create_react_agent
+from langchain import hub
+from langchain import OpenAI
+
+
+class ReactAgent(BaseAgent):
+    def __init__(self,
+            model_id: str, 
+            model_temperature: float,            
+            max_new_tokens: int = 1024,
+            load_in_8bits: bool = False,
+            load_in_4bits: bool = False
+    ):
+        tools = get_tools()
+
+        bt.logging.info(f"""Initializing ReACT agent with follow parameters:
+        - model_temperature: {model_temperature}
+        - max_new_tokens: {max_new_tokens}
+        - load_in_8bits: {load_in_8bits}
+        - load_in_4bits: {load_in_4bits}"""
+        )
+
+        prompt = hub.pull("hwchase17/react")
+
+        if 'gpt' not in model_id:            
+            llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
+        else:
+            llm = OpenAI(model_name=model_id, temperature=model_temperature)
+
+
+        # Choose the LLM to use
+        llm = OpenAI(openai_api_key='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')
+
+        # Construct the ReAct agent
+        agent = create_react_agent(llm, tools, prompt)
+
+        # Create an agent executor by passing in the agent and tools
+        self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)    
+
+
+    def run(self, input: str) -> str:
+        return self.agent_executor.run(input)
+
diff --git a/prompting/miners/wiki_agent.py b/prompting/miners/agents/single_action_agent.py
similarity index 81%
rename from prompting/miners/wiki_agent.py
rename to prompting/miners/agents/single_action_agent.py
index 5ea1485b..f0fea241 100644
--- a/prompting/miners/wiki_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -9,6 +9,8 @@
 from langchain.schema import AgentAction, AgentFinish, OutputParserException
 import re
 import bittensor as bt
+from prompting.miners.agents.utils import get_tools, load_hf_llm
+from prompting.miners.agents.base_agent import BaseAgent
 from typing import Union
 from typing import List
 from langchain.prompts import StringPromptTemplate
@@ -93,16 +95,15 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
         )
 
 
-class WikiAgent:
-    def __init__(self, model_id: str, model_temperature: float):
-        self.wikipedia = WikipediaAPIWrapper()
-        tools = [
-            Tool(
-                name="wikipedia",
-                func=self.wikipedia.run,
-                description="Useful for when you need to look up a topic, country or person on wikipedia",
-            )
-        ]
+class SingleActionAgent(BaseAgent):
+    def __init__(self,
+                model_id: str, 
+                model_temperature: float,            
+                max_new_tokens: int = 1024,
+                load_in_8bits: bool = False,
+                load_in_4bits: bool = False
+        ):
+        tools = get_tools()
 
         prompt = CustomPromptTemplate(
             template=template,
@@ -112,10 +113,18 @@ def __init__(self, model_id: str, model_temperature: float):
             input_variables=["input", "intermediate_steps"],
         )
 
-        bt.logging.info(
-            f"Initializing agent with model_id: {model_id} and model_temperature: {model_temperature}"
+        bt.logging.info(f"""Initializing single action agent with follow parameters:
+        - model_id: {model_id} 
+        - model_temperature: {model_temperature}
+        - max_new_tokens: {max_new_tokens}
+        - load_in_8bits: {load_in_8bits}
+        - load_in_4bits: {load_in_4bits}"""
         )
-        llm = OpenAI(model_name=model_id, temperature=model_temperature)
+
+        if 'gpt' not in model_id:            
+            llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
+        else:
+            llm = OpenAI(model_name=model_id, temperature=model_temperature)
 
         llm_chain = LLMChain(llm=llm, prompt=prompt)
         output_parser = CustomOutputParser()
diff --git a/prompting/miners/agents/utils.py b/prompting/miners/agents/utils.py
new file mode 100644
index 00000000..84439858
--- /dev/null
+++ b/prompting/miners/agents/utils.py
@@ -0,0 +1,38 @@
+import torch
+from langchain.utilities import WikipediaAPIWrapper
+from langchain.agents import Tool
+from langchain.llms.huggingface_pipeline import HuggingFacePipeline
+
+
+def get_tools():
+    wikipedia = WikipediaAPIWrapper()
+    tools = [
+        Tool(
+            name="wikipedia",
+            func=wikipedia.run,
+            description="Useful for when you need to look up a topic, country or person on wikipedia",
+        )
+    ]
+
+    return tools
+
+
+def load_hf_llm(model_id:str, max_new_tokens:int, load_in_8bits: bool ,load_in_4bits: bool):
+    model_kwargs = { "torch_dtype": torch.float16 }
+
+    if load_in_8bits:         
+        model_kwargs["load_in_8bit"] = True
+    elif load_in_4bits:
+        model_kwargs["load_in_4bit"] = True
+
+    
+    llm = HuggingFacePipeline.from_model_id(
+        model_id=model_id,
+        task="text-generation",        
+        device_map="auto",        
+        pipeline_kwargs={"max_new_tokens": max_new_tokens},
+        model_kwargs=model_kwargs
+    )
+
+    return llm
+

From 369820b3c3ee45337c883eab3eeadf04302deb63 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Mon, 5 Feb 2024 20:44:35 +0000
Subject: [PATCH 13/34] agents adjustments

---
 prompting/miners/agents/__init__.py           |  3 +--
 prompting/miners/agents/react_agent.py        | 26 ++++++++++++-------
 .../miners/agents/single_action_agent.py      | 18 ++++++++-----
 prompting/miners/agents/utils.py              | 15 -----------
 requirements.txt                              |  3 ++-
 5 files changed, 32 insertions(+), 33 deletions(-)

diff --git a/prompting/miners/agents/__init__.py b/prompting/miners/agents/__init__.py
index f3a1d772..449d62a6 100644
--- a/prompting/miners/agents/__init__.py
+++ b/prompting/miners/agents/__init__.py
@@ -1,4 +1,3 @@
 from .base_agent import BaseAgent
 from .single_action_agent import SingleActionAgent
-from .react_agent import ReactAgent
-from .utils import get_tools, load_hf_llm
\ No newline at end of file
+from .react_agent import ReactAgent
\ No newline at end of file
diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index d9f23777..bd5ff87d 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -1,8 +1,12 @@
 import bittensor as bt
-from prompting.miners.agents import get_tools, load_hf_llm, BaseAgent
+from prompting.miners.agents.utils import load_hf_llm
+from prompting.miners.agents.base_agent import BaseAgent
 from langchain.agents import AgentExecutor, create_react_agent
 from langchain import hub
-from langchain import OpenAI
+from langchain.chat_models import ChatOpenAI
+from langchain.utilities import WikipediaAPIWrapper
+from langchain.agents import Tool
+from langchain.tools import WikipediaQueryRun
 
 
 class ReactAgent(BaseAgent):
@@ -13,7 +17,14 @@ def __init__(self,
             load_in_8bits: bool = False,
             load_in_4bits: bool = False
     ):
-        tools = get_tools()
+        self.wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
+        tools = [
+            Tool(
+                name="Wikipedia",
+                func=self.wikipedia.run,
+                description="Useful for when you need to look up a topic, country or person on wikipedia",
+            )
+        ]
 
         bt.logging.info(f"""Initializing ReACT agent with follow parameters:
         - model_temperature: {model_temperature}
@@ -27,11 +38,7 @@ def __init__(self,
         if 'gpt' not in model_id:            
             llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
         else:
-            llm = OpenAI(model_name=model_id, temperature=model_temperature)
-
-
-        # Choose the LLM to use
-        llm = OpenAI(openai_api_key='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')
+            llm = ChatOpenAI(model_name=model_id, temperature=model_temperature)        
 
         # Construct the ReAct agent
         agent = create_react_agent(llm, tools, prompt)
@@ -41,5 +48,6 @@ def __init__(self,
 
 
     def run(self, input: str) -> str:
-        return self.agent_executor.run(input)
+        response = self.agent_executor.invoke({"input": input})['output']
+        return response
 
diff --git a/prompting/miners/agents/single_action_agent.py b/prompting/miners/agents/single_action_agent.py
index f0fea241..bd7d63c6 100644
--- a/prompting/miners/agents/single_action_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -9,14 +9,13 @@
 from langchain.schema import AgentAction, AgentFinish, OutputParserException
 import re
 import bittensor as bt
-from prompting.miners.agents.utils import get_tools, load_hf_llm
+from prompting.miners.agents.utils import load_hf_llm
 from prompting.miners.agents.base_agent import BaseAgent
 from typing import Union
 from typing import List
 from langchain.prompts import StringPromptTemplate
-from langchain import OpenAI
+from langchain.chat_models import ChatOpenAI
 from langchain.agents import Tool
-from langchain.agents import initialize_agent
 from langchain.chains import LLMChain
 from langchain.agents import (
     Tool,
@@ -24,7 +23,7 @@
     LLMSingleActionAgent,
     AgentOutputParser,
 )
-
+from langchain.tools import WikipediaQueryRun
 
 # Set up the base template
 template = """Answer the following questions as best you can. You have access to the following tools:
@@ -103,7 +102,14 @@ def __init__(self,
                 load_in_8bits: bool = False,
                 load_in_4bits: bool = False
         ):
-        tools = get_tools()
+        self.wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
+        tools = [
+            Tool(
+                name="Wikipedia",
+                func=self.wikipedia.run,
+                description="Useful for when you need to look up a topic, country or person on wikipedia",
+            )
+        ]
 
         prompt = CustomPromptTemplate(
             template=template,
@@ -124,7 +130,7 @@ def __init__(self,
         if 'gpt' not in model_id:            
             llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
         else:
-            llm = OpenAI(model_name=model_id, temperature=model_temperature)
+            llm = ChatOpenAI(model_name=model_id, temperature=model_temperature)
 
         llm_chain = LLMChain(llm=llm, prompt=prompt)
         output_parser = CustomOutputParser()
diff --git a/prompting/miners/agents/utils.py b/prompting/miners/agents/utils.py
index 84439858..ec40f941 100644
--- a/prompting/miners/agents/utils.py
+++ b/prompting/miners/agents/utils.py
@@ -1,22 +1,7 @@
 import torch
-from langchain.utilities import WikipediaAPIWrapper
-from langchain.agents import Tool
 from langchain.llms.huggingface_pipeline import HuggingFacePipeline
 
 
-def get_tools():
-    wikipedia = WikipediaAPIWrapper()
-    tools = [
-        Tool(
-            name="wikipedia",
-            func=wikipedia.run,
-            description="Useful for when you need to look up a topic, country or person on wikipedia",
-        )
-    ]
-
-    return tools
-
-
 def load_hf_llm(model_id:str, max_new_tokens:int, load_in_8bits: bool ,load_in_4bits: bool):
     model_kwargs = { "torch_dtype": torch.float16 }
 
diff --git a/requirements.txt b/requirements.txt
index 516b018e..065a6e21 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,5 +17,6 @@ tenacity
 antlr4-python3-runtime==4.11
 wikipedia
 openai==1.9.0
-langchain==0.1.0
+langchain==0.1.5
+langchainhub==0.1.14
 python-dotenv

From 53ca5032e4c87b7341a9ad2b814be58b2e903af6 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Mon, 5 Feb 2024 21:33:35 +0000
Subject: [PATCH 14/34] adds max iteration to agents

---
 prompting/miners/agents/react_agent.py         | 2 +-
 prompting/miners/agents/single_action_agent.py | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index bd5ff87d..1dcfdebe 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -44,7 +44,7 @@ def __init__(self,
         agent = create_react_agent(llm, tools, prompt)
 
         # Create an agent executor by passing in the agent and tools
-        self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)    
+        self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=5)
 
 
     def run(self, input: str) -> str:
diff --git a/prompting/miners/agents/single_action_agent.py b/prompting/miners/agents/single_action_agent.py
index bd7d63c6..e5786fcf 100644
--- a/prompting/miners/agents/single_action_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -142,8 +142,8 @@ def __init__(self,
             allowed_tools=tools,
         )
 
-        self.agent_executor = AgentExecutor.from_agent_and_tools(
-            agent=agent, tools=tools, verbose=True, handle_parsing_errors=True
+        self.agent_executor = AgentExecutor(
+            agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=5
         )
 
     def run(self, input: str) -> str:

From cc2b5c9ec721b3173cf0a0e7c8c93418142e62da Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Mon, 5 Feb 2024 21:34:17 +0000
Subject: [PATCH 15/34] fix load llm issue

---
 prompting/miners/agents/utils.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/prompting/miners/agents/utils.py b/prompting/miners/agents/utils.py
index ec40f941..b7cf9c6b 100644
--- a/prompting/miners/agents/utils.py
+++ b/prompting/miners/agents/utils.py
@@ -13,8 +13,9 @@ def load_hf_llm(model_id:str, max_new_tokens:int, load_in_8bits: bool ,load_in_4
     
     llm = HuggingFacePipeline.from_model_id(
         model_id=model_id,
-        task="text-generation",        
-        device_map="auto",        
+        task="text-generation",    
+        # TODO: Add device from config dynamically    
+        device=0,
         pipeline_kwargs={"max_new_tokens": max_new_tokens},
         model_kwargs=model_kwargs
     )

From ce160ca55ba9b90ccb58cd006dd67523473be7ad Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Mon, 5 Feb 2024 22:20:54 +0000
Subject: [PATCH 16/34] fix max tokens param

---
 prompting/miners/agent_miner.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/prompting/miners/agent_miner.py b/prompting/miners/agent_miner.py
index 080980aa..c0539ca6 100644
--- a/prompting/miners/agent_miner.py
+++ b/prompting/miners/agent_miner.py
@@ -67,7 +67,7 @@ def __init__(self, config=None):
             self.agent = ReactAgent(
                 self.config.neuron.model_id, 
                 self.config.neuron.temperature,
-                self.config.neuron.max_new_tokens,
+                self.config.neuron.max_tokens,
                 self.config.neuron.load_in_8bits,
                 self.config.neuron.load_in_4bits
             )
@@ -75,7 +75,7 @@ def __init__(self, config=None):
             self.agent = SingleActionAgent(
                 self.config.neuron.model_id, 
                 self.config.neuron.temperature,
-                self.config.neuron.max_new_tokens,
+                self.config.neuron.max_tokens,
                 self.config.neuron.load_in_8bits,
                 self.config.neuron.load_in_4bits
             )

From b8dd71b883bf1b8e943b9dd0703326dca5c1f369 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Tue, 6 Feb 2024 19:34:58 +0000
Subject: [PATCH 17/34] adds toolminer for experiments

---
 prompting/miners/__init__.py   |   1 +
 prompting/miners/tool_miner.py | 103 +++++++++++++++++++++++++++++++++
 2 files changed, 104 insertions(+)
 create mode 100644 prompting/miners/tool_miner.py

diff --git a/prompting/miners/__init__.py b/prompting/miners/__init__.py
index bcf45b7e..1d2889ee 100644
--- a/prompting/miners/__init__.py
+++ b/prompting/miners/__init__.py
@@ -7,3 +7,4 @@
 from .hf_miner import HuggingFaceMiner
 from .openai_miner import OpenAIMiner
 from .agent_miner import AgentMiner
+from .tool_miner import ToolMiner
\ No newline at end of file
diff --git a/prompting/miners/tool_miner.py b/prompting/miners/tool_miner.py
new file mode 100644
index 00000000..623bc742
--- /dev/null
+++ b/prompting/miners/tool_miner.py
@@ -0,0 +1,103 @@
+import os
+import typing
+import argparse
+import bittensor as bt
+import wikipedia
+import time
+# Bittensor Miner Template:
+from prompting.protocol import PromptingSynapse
+
+# import base miner class which takes care of most of the boilerplate
+from prompting.base.prompting_miner import BasePromptingMiner
+from langchain.chat_models import ChatOpenAI
+from dotenv import load_dotenv, find_dotenv
+from langchain.callbacks import get_openai_callback
+from langchain.prompts import ChatPromptTemplate
+from langchain_core.output_parsers import StrOutputParser
+
+
+class ToolMiner(BasePromptingMiner):    
+    @classmethod
+    def add_args(cls, parser: argparse.ArgumentParser):
+        super().add_args(parser)            
+
+
+    def __init__(self, config=None):
+        super().__init__(config=config)
+
+        bt.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
+
+        if self.config.wandb.on:
+            self.identity_tags = ("openai_miner",) + (self.config.neuron.model_id,)
+
+        _ = load_dotenv(find_dotenv())
+        api_key = os.environ.get("OPENAI_API_KEY")
+
+        # Set openai key and other args
+        self.model = ChatOpenAI(
+            api_key=api_key,
+            model_name=self.config.neuron.model_id,
+            max_tokens=self.config.neuron.max_tokens,
+            temperature=self.config.neuron.temperature,
+        )
+
+        self.system_prompt = """You are a nice AI assistant that uses the provided context to answer user queries.
+        ## Context
+        {context}
+        """
+
+    async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
+        try:
+            with get_openai_callback() as cb:
+                t0 = time.time()
+                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+
+                role = synapse.roles[-1]
+                message = synapse.messages[-1]
+
+                matches = wikipedia.search(message)
+                title = matches[0]
+                page = wikipedia.page(title)
+                context = page.content
+
+                if len(context) > 12_000:
+                    context = context[:12_000]
+
+                formatted_system_prompt = self.system_prompt.format(context=context)
+
+                prompt = ChatPromptTemplate.from_messages(
+                    [("system", formatted_system_prompt), ("user", "{input}")]
+                )
+                chain = prompt | self.model | StrOutputParser()
+                
+
+                bt.logging.debug(f"💬 Querying openai: {prompt}")
+                response = chain.invoke({"role": role, "input": message})
+
+                synapse.completion = response
+                synapse_latency = time.time() - t0
+
+                if self.config.wandb.on:
+                    self.log_event(
+                        timing=synapse_latency,
+                        prompt=message,
+                        completion=response,
+                        system_prompt=self.system_prompt,
+                        extra_info=self.get_cost_logging(cb),
+                    )
+
+            bt.logging.debug(f"✅ Served Response: {response}")
+            return synapse
+        except Exception as e:
+            bt.logging.error(f"Error in forward: {e}")
+            synapse.completion = "Error: " + str(e)
+        finally:
+            if self.config.neuron.stop_on_forward_exception:
+                self.should_exit = True
+            return synapse
+      
+    async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
+        return False, "All good here"
+
+    async def priority(self, synapse: PromptingSynapse) -> float:
+        return 1e6
\ No newline at end of file

From df7638b5e2924c29fee8b4f61d5b823b0b128e71 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Wed, 7 Feb 2024 15:58:00 +0000
Subject: [PATCH 18/34] fix error case for non wiki match

---
 prompting/miners/tool_miner.py | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/prompting/miners/tool_miner.py b/prompting/miners/tool_miner.py
index 623bc742..b975834b 100644
--- a/prompting/miners/tool_miner.py
+++ b/prompting/miners/tool_miner.py
@@ -14,6 +14,7 @@
 from langchain.callbacks import get_openai_callback
 from langchain.prompts import ChatPromptTemplate
 from langchain_core.output_parsers import StrOutputParser
+from traceback import print_exception
 
 
 class ToolMiner(BasePromptingMiner):    
@@ -56,14 +57,19 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
                 message = synapse.messages[-1]
 
                 matches = wikipedia.search(message)
-                title = matches[0]
-                page = wikipedia.page(title)
-                context = page.content
 
-                if len(context) > 12_000:
-                    context = context[:12_000]
+                # If we find a match, we add the context to the system prompt
+                if len(matches) > 0:
+                    title = matches[0]
+                    page = wikipedia.page(title)
+                    context = page.content
 
-                formatted_system_prompt = self.system_prompt.format(context=context)
+                    if len(context) > 12_000:
+                        context = context[:12_000]
+
+                    formatted_system_prompt = self.system_prompt.format(context=context)
+                else:
+                    formatted_system_prompt = self.config.neuron.system_prompt
 
                 prompt = ChatPromptTemplate.from_messages(
                     [("system", formatted_system_prompt), ("user", "{input}")]
@@ -90,6 +96,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
             return synapse
         except Exception as e:
             bt.logging.error(f"Error in forward: {e}")
+            bt.logging.error(print_exception(value=e))
             synapse.completion = "Error: " + str(e)
         finally:
             if self.config.neuron.stop_on_forward_exception:

From 8dc340d1dccb54f394e731382c521a8afac05ee7 Mon Sep 17 00:00:00 2001
From: p-ferreira <pe_drojunior@hotmail.com>
Date: Thu, 8 Feb 2024 22:20:49 +0000
Subject: [PATCH 19/34] increase wiki retries and fix tool_miner bug

---
 prompting/miners/tool_miner.py | 3 ++-
 prompting/tools/dataset.py     | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/prompting/miners/tool_miner.py b/prompting/miners/tool_miner.py
index b975834b..04008f93 100644
--- a/prompting/miners/tool_miner.py
+++ b/prompting/miners/tool_miner.py
@@ -56,7 +56,8 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
                 role = synapse.roles[-1]
                 message = synapse.messages[-1]
 
-                matches = wikipedia.search(message)
+                # Message needs to be limited to 300 characters for wikipedia search, otherwise it will a return an error
+                matches = wikipedia.search(message[:300])
 
                 # If we find a match, we add the context to the system prompt
                 if len(matches) > 0:
diff --git a/prompting/tools/dataset.py b/prompting/tools/dataset.py
index d46ebad1..945cd153 100644
--- a/prompting/tools/dataset.py
+++ b/prompting/tools/dataset.py
@@ -335,7 +335,7 @@ def next(self):
 
 
 class DateQADataset:
-    def __init__(self, max_tries: int = 10, seed=None):
+    def __init__(self, max_tries: int = 50, seed=None):
         self.max_tries = max_tries
         self.seed = seed
         self.rng = random.Random(seed)

From 443f831373708f8d057a6003541c9fc82b47150e Mon Sep 17 00:00:00 2001
From: mccrindlebrian <mccrinbc@gmail.com>
Date: Wed, 14 Feb 2024 10:15:23 -0800
Subject: [PATCH 20/34] black

---
 prompting/.DS_Store                           | Bin 0 -> 8196 bytes
 prompting/agent.py                            |  10 +--
 prompting/base/miner.py                       |  12 ++--
 prompting/base/prompting_miner.py             |  59 +++++++++++-------
 prompting/base/validator.py                   |  24 +++----
 prompting/cleaners/cleaner.py                 |   4 +-
 prompting/dendrite.py                         |  10 +--
 prompting/llm.py                              |  17 +++--
 prompting/miners/__init__.py                  |   2 +-
 prompting/miners/agent_miner.py               |   9 ++-
 prompting/miners/agents/__init__.py           |   2 +-
 prompting/miners/agents/base_agent.py         |   3 +-
 prompting/miners/agents/react_agent.py        |  32 ++++++----
 .../miners/agents/single_action_agent.py      |  26 +++++---
 prompting/miners/agents/utils.py              |  16 ++---
 prompting/miners/hf_miner.py                  |   1 -
 prompting/miners/tool_miner.py                |  11 ++--
 prompting/mock.py                             |  22 ++++---
 prompting/rewards/code_diff.py                |  10 +--
 prompting/rewards/date.py                     |  35 ++++++++---
 prompting/rewards/float_diff.py               |  18 +++---
 prompting/rewards/pipeline.py                 |  36 ++++++-----
 prompting/rewards/relevance.py                |   4 +-
 prompting/rewards/reward.py                   |  58 +++++++++--------
 prompting/rewards/rouge.py                    |   4 +-
 prompting/tasks/date_qa.py                    |   7 +--
 prompting/tasks/debugging.py                  |  27 +++-----
 prompting/tasks/generic_instruction.py        |   8 +--
 prompting/tasks/math.py                       |  32 +++++-----
 prompting/tasks/qa.py                         |  10 +--
 prompting/tasks/summarization.py              |   9 +--
 prompting/tasks/task.py                       |   3 +-
 prompting/utils/config.py                     |  25 ++++----
 prompting/utils/logging.py                    |   4 +-
 prompting/utils/uids.py                       |  22 ++++---
 35 files changed, 291 insertions(+), 281 deletions(-)
 create mode 100644 prompting/.DS_Store

diff --git a/prompting/.DS_Store b/prompting/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..d31696ad021d79b0cc8d779e05478967f796cf25
GIT binary patch
literal 8196
zcmeHML2uJA82#LJYq|>9VMybE6p8CpI<X-Tmr}L^2QDju1E7+uVT+c<Rhq7vs!F}X
zZ{RO*=7+$4;RNsdoK<O>ajcNAEBiUgd-?2VC!RARQk_wMk7$R89Aw6=28xoz*Euhx
zN_J)&sGuhbDWx8MO&Tn*?Fs9Eb-+4c9k32q2mS>Iux4{|ZdmutRa;sItONh01N?q)
zkr~?=&J4<{1BGk>fK{B9ggW*CqH!~9XE-w`Qry$*9#ke(*%m_?Ij*}*2evbu88mWI
zMoub=tg;=7lIYO6LMPQWXiMvWbzs>6-n%zs&mK~|uz$aL8R-*0(mojcP(4R({+5K=
zwQ4|DbV45T83lZwg6dmA<$!w%W{fWd{FR^(Mcps(H^3j_7l1!pytn%NH?BV(4zkvN
z_XklpOk1s=s<KvH->5k?r{R3)p6PKn_0rL><0bF-=(X07e_i+dH(@aDZEe5Maq5L}
zkcc4c2cW!v8^(P-?&wk6ABwhXE;x0k-rKr2ne0D$yzA~Cw5Gf6WUuw4g@dOD(`ntg
z`{3cT<KB665o_)MlFHs`bjuvi%P*)c7;xE5;z-A*EJCi5FZKilV}9XUeyAJ_PXr4+
z*d9<)G_u0?%k%Gmufe@EZ0J%<4L(ycWR)KUb%XlQ#V8Y*P(hhWrqkElblRj(h>*}2
z(GqA&A`wrfyk-Nvis+(@D-$Wtt%0Y}uk%tik3^omiK!`0Nw2+94ZEDzG@wbqPbH2<
z%S!qnUguSHI;JDIaY(O#O|-JVIGW;bUQt7oVD3}lp<rLt?VPXcXb(R^=fzQHH<y?i
zZI6mf_Oien$&6TL!p8+BPt!JhVw@S|iOa`k0k`YF*kKFnz{(w1GZA+hz@wYL|F67V
zHlTIDI<PtiRQ0fX*g-tAG%p9{rP@XQh|Gz3GlL>QA*(r1w&TG0KMYZKL1j%l!<j*x
QL9%}chz+)|4y@FHKb>?v_y7O^

literal 0
HcmV?d00001

diff --git a/prompting/agent.py b/prompting/agent.py
index 14473151..4aba4a94 100644
--- a/prompting/agent.py
+++ b/prompting/agent.py
@@ -85,12 +85,12 @@ def create_challenge(self) -> str:
         t0 = time.time()
 
         cleaner = None
-        if hasattr(self.task, 'cleaning_pipeline'):
-            cleaner = CleanerPipeline(
-                cleaning_pipeline=self.task.cleaning_pipeline
-            )
+        if hasattr(self.task, "cleaning_pipeline"):
+            cleaner = CleanerPipeline(cleaning_pipeline=self.task.cleaning_pipeline)
 
-        self.challenge = super().query(message="Ask a question related to your goal", cleaner=cleaner)
+        self.challenge = super().query(
+            message="Ask a question related to your goal", cleaner=cleaner
+        )
         self.challenge = self.task.format_challenge(self.challenge)
         self.challenge_time = time.time() - t0
 
diff --git a/prompting/base/miner.py b/prompting/base/miner.py
index fbc9d6c5..07fc6347 100644
--- a/prompting/base/miner.py
+++ b/prompting/base/miner.py
@@ -30,11 +30,11 @@ class BaseMinerNeuron(BaseNeuron):
     """
     Base class for Bittensor miners.
     """
+
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
-        super().add_args(parser)  
-        add_miner_args(cls, parser)    
-
+        super().add_args(parser)
+        add_miner_args(cls, parser)
 
     def __init__(self, config=None):
         super().__init__(config=config)
@@ -59,7 +59,7 @@ def __init__(self, config=None):
             blacklist_fn=self.blacklist,
             priority_fn=self.priority,
         )
-        bt.logging.info(f"Axon created: {self.axon}")      
+        bt.logging.info(f"Axon created: {self.axon}")
 
         # Instantiate runners
         self.should_exit: bool = False
@@ -209,9 +209,7 @@ def set_weights(self):
             )
 
         except Exception as e:
-            bt.logging.error(
-                f"Failed to set weights on chain with exception: { e }"
-            )
+            bt.logging.error(f"Failed to set weights on chain with exception: { e }")
 
         bt.logging.info(f"Set weights: {chain_weights}")
 
diff --git a/prompting/base/prompting_miner.py b/prompting/base/prompting_miner.py
index e5abe691..2e1a123f 100644
--- a/prompting/base/prompting_miner.py
+++ b/prompting/base/prompting_miner.py
@@ -18,13 +18,16 @@
 import time
 import typing
 import bittensor as bt
+
 # Bittensor Miner Template:
 import prompting
 from prompting.protocol import PromptingSynapse
+
 # import base miner class which takes care of most of the boilerplate
 from prompting.base.miner import BaseMinerNeuron
 from datetime import datetime
 
+
 class BasePromptingMiner(BaseMinerNeuron):
     """
     Your miner neuron class. You should use this class to define your miner's behavior. In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs.
@@ -35,13 +38,10 @@ class BasePromptingMiner(BaseMinerNeuron):
     """
 
     def __init__(self, config=None):
-        super().__init__(config=config)                
+        super().__init__(config=config)
         self.identity_tags = None
-         
 
-    async def blacklist(
-        self, synapse: PromptingSynapse
-    ) -> typing.Tuple[bool, str]:
+    async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
         """
         Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should
         define the logic for blacklisting requests based on your needs and desired security parameters.
@@ -113,34 +113,38 @@ async def priority(self, synapse: PromptingSynapse) -> float:
             f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority
         )
         return prirority
-    
+
     def init_wandb(self):
         bt.logging.info("Initializing wandb...")
-        
+
         uid = f"uid_{self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address)}"
         net_uid = f"netuid_{self.config.netuid}"
         tags = [
-            self.wallet.hotkey.ss58_address, 
-            net_uid, 
+            self.wallet.hotkey.ss58_address,
+            net_uid,
             f"uid_{uid}",
             prompting.__version__,
             str(prompting.__spec_version__),
         ]
-        
+
         run_name = None
         if self.identity_tags:
             # Add identity tags to run tags
-            tags += self.identity_tags     
+            tags += self.identity_tags
+
+            # Create run name from identity tags
+            run_name_tags = [str(tag) for tag in self.identity_tags]
 
-            # Create run name from identity tags       
-            run_name_tags = [str(tag) for tag in self.identity_tags]            
-            
             # Add uid, netuid and timestamp to run name
-            run_name_tags += [uid, net_uid, datetime.now().strftime('%Y_%m_%d_%H_%M_%S')]
+            run_name_tags += [
+                uid,
+                net_uid,
+                datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
+            ]
 
             # Compose run name
-            run_name = '_'.join(run_name_tags)                
-                    
+            run_name = "_".join(run_name_tags)
+
         # inits wandb in case it hasn't been inited yet
         self.wandb_run = wandb.init(
             name=run_name,
@@ -148,13 +152,20 @@ def init_wandb(self):
             entity=self.config.wandb.entity,
             config=self.config,
             mode="online" if self.config.wandb.on else "offline",
-            tags=tags,                
+            tags=tags,
         )
-    
-    def log_event(self, timing: float, prompt: str, completion: str, system_prompt: str, extra_info: dict = {}):        
+
+    def log_event(
+        self,
+        timing: float,
+        prompt: str,
+        completion: str,
+        system_prompt: str,
+        extra_info: dict = {},
+    ):
         if not getattr(self, "wandb_run", None):
             self.init_wandb()
-        
+
         step_log = {
             "epoch_time": timing,
             # "block": self.last_epoch_block,
@@ -167,8 +178,8 @@ def log_event(self, timing: float, prompt: str, completion: str, system_prompt:
             "incentive": self.metagraph.I[self.uid].item(),
             "consensus": self.metagraph.C[self.uid].item(),
             "dividends": self.metagraph.D[self.uid].item(),
-            **extra_info
+            **extra_info,
         }
 
-        bt.logging.info('Logging event to wandb...', step_log)
-        wandb.log(step_log)
\ No newline at end of file
+        bt.logging.info("Logging event to wandb...", step_log)
+        wandb.log(step_log)
diff --git a/prompting/base/validator.py b/prompting/base/validator.py
index 1f129272..9cdb78b5 100644
--- a/prompting/base/validator.py
+++ b/prompting/base/validator.py
@@ -35,12 +35,11 @@ class BaseValidatorNeuron(BaseNeuron):
     """
     Base class for Bittensor validators. Your validator should inherit from this class.
     """
-    
+
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
         super().add_args(parser)
-        add_validator_args(cls, parser)    
- 
+        add_validator_args(cls, parser)
 
     def __init__(self, config=None):
         super().__init__(config=config)
@@ -58,7 +57,9 @@ def __init__(self, config=None):
 
         # Set up initial scoring weights for validation
         bt.logging.info("Building validation weights.")
-        self.scores = torch.zeros(self.metagraph.n, dtype=torch.float32, device=self.device)
+        self.scores = torch.zeros(
+            self.metagraph.n, dtype=torch.float32, device=self.device
+        )
 
         # Init sync with the network. Updates the metagraph.
         self.sync()
@@ -94,14 +95,11 @@ def serve_axon(self):
                 bt.logging.error(f"Failed to serve Axon with exception: {e}")
 
         except Exception as e:
-            bt.logging.error(
-                f"Failed to create Axon initialize with exception: {e}"
-            )
+            bt.logging.error(f"Failed to create Axon initialize with exception: {e}")
 
     async def concurrent_forward(self):
         coroutines = [
-            self.forward()
-            for _ in range(self.config.neuron.num_concurrent_forwards)
+            self.forward() for _ in range(self.config.neuron.num_concurrent_forwards)
         ]
         await asyncio.gather(*coroutines)
 
@@ -161,8 +159,8 @@ def run(self):
         except Exception as err:
             bt.logging.error("Error during validation", str(err))
             bt.logging.debug(print_exception(type(err), err, err.__traceback__))
-            self.should_exit = True        
-    
+            self.should_exit = True
+
     def run_in_background_thread(self):
         """
         Starts the validator's operations in a background thread upon entering the context.
@@ -224,9 +222,7 @@ def set_weights(self):
 
         # Calculate the average reward for each uid across non-zero values.
         # Replace any NaN values with 0.
-        raw_weights = torch.nn.functional.normalize(
-            self.scores, p=1, dim=0
-        )
+        raw_weights = torch.nn.functional.normalize(self.scores, p=1, dim=0)
 
         bt.logging.debug("raw_weights", raw_weights)
         bt.logging.debug("raw_weight_uids", self.metagraph.uids.to("cpu"))
diff --git a/prompting/cleaners/cleaner.py b/prompting/cleaners/cleaner.py
index bae75a7b..5d7e4b3a 100644
--- a/prompting/cleaners/cleaner.py
+++ b/prompting/cleaners/cleaner.py
@@ -49,5 +49,7 @@ def apply(self, generation: str) -> str:
             return generation
 
         except Exception as E:
-            bt.logging.error(f"Failed to apply cleaning pipeline {cleaner['name']}. {E},")
+            bt.logging.error(
+                f"Failed to apply cleaning pipeline {cleaner['name']}. {E},"
+            )
             return generation
diff --git a/prompting/dendrite.py b/prompting/dendrite.py
index 6e146850..1f592c2e 100644
--- a/prompting/dendrite.py
+++ b/prompting/dendrite.py
@@ -5,18 +5,13 @@
 
 class DendriteResponseEvent:
     def __init__(self, responses: List[bt.Synapse], uids: torch.LongTensor):
-
         self.uids = uids
         self.completions = [synapse.completion for synapse in responses]
-        self.timings = [
-            synapse.dendrite.process_time or 0 for synapse in responses
-        ]
+        self.timings = [synapse.dendrite.process_time or 0 for synapse in responses]
         self.status_messages = [
             synapse.dendrite.status_message for synapse in responses
         ]
-        self.status_codes = [
-            synapse.dendrite.status_code for synapse in responses
-        ]
+        self.status_codes = [synapse.dendrite.status_code for synapse in responses]
 
     def __state_dict__(self):
         return {
@@ -29,4 +24,3 @@ def __state_dict__(self):
 
     def __repr__(self):
         return f"DendriteResponseEvent(uids={self.uids}, completions={self.completions}, timings={self.timings}, status_messages={self.status_messages}, status_codes={self.status_codes})"
-
diff --git a/prompting/llm.py b/prompting/llm.py
index 5b5a560e..bd7afd81 100644
--- a/prompting/llm.py
+++ b/prompting/llm.py
@@ -26,7 +26,7 @@
 from prompting.cleaners.cleaner import CleanerPipeline
 
 
-def load_pipeline(model_id, device=None, mock=False, model_kwargs:dict = None):
+def load_pipeline(model_id, device=None, mock=False, model_kwargs: dict = None):
     """Loads the HuggingFace pipeline for the LLM, or a mock pipeline if mock=True"""
 
     if mock or model_id == "mock":
@@ -34,16 +34,13 @@ def load_pipeline(model_id, device=None, mock=False, model_kwargs:dict = None):
 
     if not device.startswith("cuda"):
         bt.logging.warning("Only crazy people run this on CPU. It is not recommended.")
-    
+
     # Sets default model torch type in case is not defined
     if model_kwargs is None:
         model_kwargs = dict(torch_dtype=torch.bfloat16)
-    
+
     llm_pipeline = pipeline(
-        "text-generation",
-        model=model_id,
-        device_map=device,
-        model_kwargs=model_kwargs
+        "text-generation", model=model_id, device_map=device, model_kwargs=model_kwargs
     )
 
     return llm_pipeline
@@ -88,10 +85,12 @@ def query(
         tbeg = time.time()
         response = self.forward(messages=messages)
 
-        if cleaner is not None:            
+        if cleaner is not None:
             clean_response = cleaner.apply(generation=response)
             if clean_response != response:
-                bt.logging.debug(f"Response cleaned, chars removed: {len(response) - len(clean_response)}...")
+                bt.logging.debug(
+                    f"Response cleaned, chars removed: {len(response) - len(clean_response)}..."
+                )
             response = clean_response
 
         self.messages = messages + [{"content": response, "role": "assistant"}]
diff --git a/prompting/miners/__init__.py b/prompting/miners/__init__.py
index 1d2889ee..65c7d06c 100644
--- a/prompting/miners/__init__.py
+++ b/prompting/miners/__init__.py
@@ -7,4 +7,4 @@
 from .hf_miner import HuggingFaceMiner
 from .openai_miner import OpenAIMiner
 from .agent_miner import AgentMiner
-from .tool_miner import ToolMiner
\ No newline at end of file
+from .tool_miner import ToolMiner
diff --git a/prompting/miners/agent_miner.py b/prompting/miners/agent_miner.py
index c0539ca6..0b223aeb 100644
--- a/prompting/miners/agent_miner.py
+++ b/prompting/miners/agent_miner.py
@@ -65,22 +65,21 @@ def __init__(self, config=None):
 
         if self.config.use_react_agent:
             self.agent = ReactAgent(
-                self.config.neuron.model_id, 
+                self.config.neuron.model_id,
                 self.config.neuron.temperature,
                 self.config.neuron.max_tokens,
                 self.config.neuron.load_in_8bits,
-                self.config.neuron.load_in_4bits
+                self.config.neuron.load_in_4bits,
             )
         else:
             self.agent = SingleActionAgent(
-                self.config.neuron.model_id, 
+                self.config.neuron.model_id,
                 self.config.neuron.temperature,
                 self.config.neuron.max_tokens,
                 self.config.neuron.load_in_8bits,
-                self.config.neuron.load_in_4bits
+                self.config.neuron.load_in_4bits,
             )
 
-
         self.accumulated_total_tokens = 0
         self.accumulated_prompt_tokens = 0
         self.accumulated_completion_tokens = 0
diff --git a/prompting/miners/agents/__init__.py b/prompting/miners/agents/__init__.py
index 449d62a6..63cdcde5 100644
--- a/prompting/miners/agents/__init__.py
+++ b/prompting/miners/agents/__init__.py
@@ -1,3 +1,3 @@
 from .base_agent import BaseAgent
 from .single_action_agent import SingleActionAgent
-from .react_agent import ReactAgent
\ No newline at end of file
+from .react_agent import ReactAgent
diff --git a/prompting/miners/agents/base_agent.py b/prompting/miners/agents/base_agent.py
index 94f38763..62e546bf 100644
--- a/prompting/miners/agents/base_agent.py
+++ b/prompting/miners/agents/base_agent.py
@@ -1,5 +1,6 @@
 from abc import ABC
 
+
 class BaseAgent(ABC):
     def run(self, input: str) -> str:
-        pass
\ No newline at end of file
+        pass
diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index 1dcfdebe..c6e7358c 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -10,12 +10,13 @@
 
 
 class ReactAgent(BaseAgent):
-    def __init__(self,
-            model_id: str, 
-            model_temperature: float,            
-            max_new_tokens: int = 1024,
-            load_in_8bits: bool = False,
-            load_in_4bits: bool = False
+    def __init__(
+        self,
+        model_id: str,
+        model_temperature: float,
+        max_new_tokens: int = 1024,
+        load_in_8bits: bool = False,
+        load_in_4bits: bool = False,
     ):
         self.wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
         tools = [
@@ -26,7 +27,8 @@ def __init__(self,
             )
         ]
 
-        bt.logging.info(f"""Initializing ReACT agent with follow parameters:
+        bt.logging.info(
+            f"""Initializing ReACT agent with follow parameters:
         - model_temperature: {model_temperature}
         - max_new_tokens: {max_new_tokens}
         - load_in_8bits: {load_in_8bits}
@@ -35,19 +37,23 @@ def __init__(self,
 
         prompt = hub.pull("hwchase17/react")
 
-        if 'gpt' not in model_id:            
+        if "gpt" not in model_id:
             llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
         else:
-            llm = ChatOpenAI(model_name=model_id, temperature=model_temperature)        
+            llm = ChatOpenAI(model_name=model_id, temperature=model_temperature)
 
         # Construct the ReAct agent
         agent = create_react_agent(llm, tools, prompt)
 
         # Create an agent executor by passing in the agent and tools
-        self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=5)
-
+        self.agent_executor = AgentExecutor(
+            agent=agent,
+            tools=tools,
+            verbose=True,
+            handle_parsing_errors=True,
+            max_iterations=5,
+        )
 
     def run(self, input: str) -> str:
-        response = self.agent_executor.invoke({"input": input})['output']
+        response = self.agent_executor.invoke({"input": input})["output"]
         return response
-
diff --git a/prompting/miners/agents/single_action_agent.py b/prompting/miners/agents/single_action_agent.py
index e5786fcf..825f7565 100644
--- a/prompting/miners/agents/single_action_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -95,13 +95,14 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
 
 
 class SingleActionAgent(BaseAgent):
-    def __init__(self,
-                model_id: str, 
-                model_temperature: float,            
-                max_new_tokens: int = 1024,
-                load_in_8bits: bool = False,
-                load_in_4bits: bool = False
-        ):
+    def __init__(
+        self,
+        model_id: str,
+        model_temperature: float,
+        max_new_tokens: int = 1024,
+        load_in_8bits: bool = False,
+        load_in_4bits: bool = False,
+    ):
         self.wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
         tools = [
             Tool(
@@ -119,7 +120,8 @@ def __init__(self,
             input_variables=["input", "intermediate_steps"],
         )
 
-        bt.logging.info(f"""Initializing single action agent with follow parameters:
+        bt.logging.info(
+            f"""Initializing single action agent with follow parameters:
         - model_id: {model_id} 
         - model_temperature: {model_temperature}
         - max_new_tokens: {max_new_tokens}
@@ -127,7 +129,7 @@ def __init__(self,
         - load_in_4bits: {load_in_4bits}"""
         )
 
-        if 'gpt' not in model_id:            
+        if "gpt" not in model_id:
             llm = load_hf_llm(model_id, max_new_tokens, load_in_8bits, load_in_4bits)
         else:
             llm = ChatOpenAI(model_name=model_id, temperature=model_temperature)
@@ -143,7 +145,11 @@ def __init__(self,
         )
 
         self.agent_executor = AgentExecutor(
-            agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=5
+            agent=agent,
+            tools=tools,
+            verbose=True,
+            handle_parsing_errors=True,
+            max_iterations=5,
         )
 
     def run(self, input: str) -> str:
diff --git a/prompting/miners/agents/utils.py b/prompting/miners/agents/utils.py
index b7cf9c6b..aa6fc754 100644
--- a/prompting/miners/agents/utils.py
+++ b/prompting/miners/agents/utils.py
@@ -2,23 +2,23 @@
 from langchain.llms.huggingface_pipeline import HuggingFacePipeline
 
 
-def load_hf_llm(model_id:str, max_new_tokens:int, load_in_8bits: bool ,load_in_4bits: bool):
-    model_kwargs = { "torch_dtype": torch.float16 }
+def load_hf_llm(
+    model_id: str, max_new_tokens: int, load_in_8bits: bool, load_in_4bits: bool
+):
+    model_kwargs = {"torch_dtype": torch.float16}
 
-    if load_in_8bits:         
+    if load_in_8bits:
         model_kwargs["load_in_8bit"] = True
     elif load_in_4bits:
         model_kwargs["load_in_4bit"] = True
 
-    
     llm = HuggingFacePipeline.from_model_id(
         model_id=model_id,
-        task="text-generation",    
-        # TODO: Add device from config dynamically    
+        task="text-generation",
+        # TODO: Add device from config dynamically
         device=0,
         pipeline_kwargs={"max_new_tokens": max_new_tokens},
-        model_kwargs=model_kwargs
+        model_kwargs=model_kwargs,
     )
 
     return llm
-
diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
index 2e3c1e50..41d1d637 100644
--- a/prompting/miners/hf_miner.py
+++ b/prompting/miners/hf_miner.py
@@ -143,4 +143,3 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
             if self.config.neuron.stop_on_forward_exception:
                 self.should_exit = True
             return synapse
-
diff --git a/prompting/miners/tool_miner.py b/prompting/miners/tool_miner.py
index 04008f93..321a97ad 100644
--- a/prompting/miners/tool_miner.py
+++ b/prompting/miners/tool_miner.py
@@ -4,6 +4,7 @@
 import bittensor as bt
 import wikipedia
 import time
+
 # Bittensor Miner Template:
 from prompting.protocol import PromptingSynapse
 
@@ -17,11 +18,10 @@
 from traceback import print_exception
 
 
-class ToolMiner(BasePromptingMiner):    
+class ToolMiner(BasePromptingMiner):
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):
-        super().add_args(parser)            
-
+        super().add_args(parser)
 
     def __init__(self, config=None):
         super().__init__(config=config)
@@ -76,7 +76,6 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
                     [("system", formatted_system_prompt), ("user", "{input}")]
                 )
                 chain = prompt | self.model | StrOutputParser()
-                
 
                 bt.logging.debug(f"💬 Querying openai: {prompt}")
                 response = chain.invoke({"role": role, "input": message})
@@ -103,9 +102,9 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
             if self.config.neuron.stop_on_forward_exception:
                 self.should_exit = True
             return synapse
-      
+
     async def blacklist(self, synapse: PromptingSynapse) -> typing.Tuple[bool, str]:
         return False, "All good here"
 
     async def priority(self, synapse: PromptingSynapse) -> float:
-        return 1e6
\ No newline at end of file
+        return 1e6
diff --git a/prompting/mock.py b/prompting/mock.py
index e5862c27..725876fd 100644
--- a/prompting/mock.py
+++ b/prompting/mock.py
@@ -7,6 +7,7 @@
 
 from typing import List
 
+
 class MockTokenizer:
     def __init__(self):
         super().__init__()
@@ -64,9 +65,9 @@ def forward(self, messages, **kwargs):
         return self.postprocess(output)
 
     def postprocess(self, output, **kwargs):
-        output = output.split(
-            self.model.tokenizer.role_expr.format(role="assistant")
-        )[-1].strip()
+        output = output.split(self.model.tokenizer.role_expr.format(role="assistant"))[
+            -1
+        ].strip()
         return [{"generated_text": output}]
 
     def preprocess(self, **kwargs):
@@ -103,9 +104,7 @@ def __init__(self, netuid, n=16, wallet=None, network="mock"):
 
 class MockMetagraph(bt.metagraph):
     def __init__(self, netuid=1, network="mock", subtensor=None):
-        super().__init__(
-            netuid=netuid, network=network, sync=False
-        )
+        super().__init__(netuid=netuid, network=network, sync=False)
 
         if subtensor is not None:
             self.subtensor = subtensor
@@ -123,6 +122,7 @@ class MockDendrite(bt.dendrite):
     """
     Replaces a real bittensor network request with a mock request that just returns some static completion for all axons that are passed and adds some random delay.
     """
+
     def __init__(self, wallet):
         super().__init__(wallet)
 
@@ -135,7 +135,6 @@ async def forward(
         run_async: bool = True,
         streaming: bool = False,
     ):
-
         if streaming:
             raise NotImplementedError("Streaming not implemented yet.")
 
@@ -154,7 +153,7 @@ async def single_axon_response(i, axon):
                 if process_time < timeout:
                     s.dendrite.process_time = str(time.time() - start_time)
                     # Update the status code and status message of the dendrite to match the axon
-                    s.completion = f'Mock miner completion {i}'
+                    s.completion = f"Mock miner completion {i}"
                     s.dendrite.status_code = 200
                     s.dendrite.status_message = "OK"
                     synapse.dendrite.process_time = str(process_time)
@@ -171,7 +170,10 @@ async def single_axon_response(i, axon):
                     return s
 
             return await asyncio.gather(
-                *(single_axon_response(i, target_axon) for i, target_axon in enumerate(axons))
+                *(
+                    single_axon_response(i, target_axon)
+                    for i, target_axon in enumerate(axons)
+                )
             )
 
         return await query_all_axons(streaming)
@@ -183,4 +185,4 @@ def __str__(self) -> str:
         Returns:
             str: The string representation of the Dendrite object in the format "dendrite(<user_wallet_address>)".
         """
-        return "MockDendrite({})".format(self.keypair.ss58_address)
\ No newline at end of file
+        return "MockDendrite({})".format(self.keypair.ss58_address)
diff --git a/prompting/rewards/code_diff.py b/prompting/rewards/code_diff.py
index 1549ee97..35661790 100644
--- a/prompting/rewards/code_diff.py
+++ b/prompting/rewards/code_diff.py
@@ -21,17 +21,13 @@ def __init__(self, lines=False, threshold=None, **kwargs):
 
     def unified_diff(self, reference, completion):
         return len(
-            difflib.unified_diff(
-                reference.splitlines(), completion.splitlines()
-            )
+            difflib.unified_diff(reference.splitlines(), completion.splitlines())
         )
 
     def seq_match(self, reference, completion):
         return difflib.SequenceMatcher(None, reference, completion).ratio()
 
-    def reward(
-        self, reference: str, completions: List[str]
-    ) -> BatchRewardOutput:
+    def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
         """Get the score between two strings.
         lines: If True, return a unified diff. If False, return a ratio.
         """
@@ -52,7 +48,7 @@ def reward(
 
         output = BatchRewardOutput(
             rewards=torch.FloatTensor(rewards),
-            timings=torch.FloatTensor(timings),            
+            timings=torch.FloatTensor(timings),
             extra_info={"threshold": self.threshold, "lines": self.lines},
         )
 
diff --git a/prompting/rewards/date.py b/prompting/rewards/date.py
index 119f7040..7264ce15 100644
--- a/prompting/rewards/date.py
+++ b/prompting/rewards/date.py
@@ -7,7 +7,7 @@
 class DateRewardModel(BaseRewardModel):
     @property
     def name(self) -> str:
-        return 'date'
+        return "date"
 
     def __init__(self, **kwargs):
         super().__init__()
@@ -15,25 +15,38 @@ def __init__(self, **kwargs):
     def date_score(self, reference, completion):
         # TODO: cleanup code
         score = 1
-        #Take the last 4 characters of the reference as the year
+        # Take the last 4 characters of the reference as the year
         year = reference[-4:]
         month = reference.split()[0].strip()
         month_num = str(time.strptime(month, "%B").tm_mon)
-        day = reference.split()[1].strip(',')
+        day = reference.split()[1].strip(",")
         number_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
         not_in_month_day_year = set(str(month_num) + str(day) + str(year))
         numbers = [str(x) for x in number_list if str(x) not in not_in_month_day_year]
         # Create a list of the months
-        month_list = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
+        month_list = [
+            "January",
+            "February",
+            "March",
+            "April",
+            "May",
+            "June",
+            "July",
+            "August",
+            "September",
+            "October",
+            "November",
+            "December",
+        ]
         months = [x for x in month_list if x not in month]
-        
+
         if not year in completion:
             score -= 0.5
         if not (month_num in completion or month in completion):
             score -= 0.25
         if not day in completion:
             score -= 0.25
-            
+
         if not score == 0:
             # Check if numbers are in completion
             for number in numbers:
@@ -52,13 +65,15 @@ def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
 
         for completion in completions:
             t0 = time.time()
-            reward = self.date_score(reference, completion) 
+            reward = self.date_score(reference, completion)
             timings.append(time.time() - t0)
             rewards.append(reward)
 
         output = BatchRewardOutput(
-            rewards = torch.FloatTensor(rewards),
-            timings = torch.FloatTensor(timings),
-            extra_info = {'type': 'date', },
+            rewards=torch.FloatTensor(rewards),
+            timings=torch.FloatTensor(timings),
+            extra_info={
+                "type": "date",
+            },
         )
         return output
diff --git a/prompting/rewards/float_diff.py b/prompting/rewards/float_diff.py
index a57d076b..f32e082a 100644
--- a/prompting/rewards/float_diff.py
+++ b/prompting/rewards/float_diff.py
@@ -8,7 +8,7 @@
 class FloatDiffModel(BaseRewardModel):
     @property
     def name(self) -> str:
-        return 'float_diff'
+        return "float_diff"
 
     def __init__(self, **kwargs):
         super().__init__()
@@ -19,7 +19,7 @@ def extract_number(text):
         for word in text.split()[::-1]:
             try:
                 # Convert the string to a float
-                return parse_expr(word.replace('$', ''))
+                return parse_expr(word.replace("$", ""))
             except Exception:
                 continue
 
@@ -33,14 +33,13 @@ def math_score(reference, completion):
             return 0.0
 
         try:
-
             # Convert reference to float (this is okay because we already checked that the reference is a float)
             # TODO: More flexible parsing of the reference (just as with the completion)
             ref = float(reference)
             if pred == ref:
-                return 1.0            
+                return 1.0
             # Compute the difference
-            diff = abs(ref - pred)/(ref + 1e-6)
+            diff = abs(ref - pred) / (ref + 1e-6)
             # Make sure the difference is between 0 and 1
             diff = min(abs(diff), 1)
 
@@ -48,7 +47,6 @@ def math_score(reference, completion):
         except Exception:
             return 0.0
 
-
     def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
         """Compute difference scores given a completion and reference pair."""
         rewards = []
@@ -61,8 +59,10 @@ def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
             rewards.append(reward)
 
         output = BatchRewardOutput(
-            rewards = torch.FloatTensor(rewards),
-            timings = torch.FloatTensor(timings),
-            extra_info = {'type': 'math', },
+            rewards=torch.FloatTensor(rewards),
+            timings=torch.FloatTensor(timings),
+            extra_info={
+                "type": "math",
+            },
         )
         return output
diff --git a/prompting/rewards/pipeline.py b/prompting/rewards/pipeline.py
index b8cc0d7d..271b88f1 100644
--- a/prompting/rewards/pipeline.py
+++ b/prompting/rewards/pipeline.py
@@ -30,12 +30,11 @@
     "rouge": RougeRewardModel,
     "relevance": RelevanceRewardModel,
     "diff": DiffRewardModel,
-    'float_diff': FloatDiffModel,
-    'date': DateRewardModel,
+    "float_diff": FloatDiffModel,
+    "date": DateRewardModel,
 }
 
 
-
 class RewardPipeline:
     def __init__(self, selected_tasks: List[str], device):
         self.selected_tasks = selected_tasks
@@ -50,10 +49,9 @@ def get(self, __key: str) -> BaseRewardModel:
         return self.reward_models.get(__key)
 
     def __repr__(self):
-        return f'RewardPipeline({self.reward_models})'
+        return f"RewardPipeline({self.reward_models})"
 
     def validate_tasks(self):
-        
         for task in self.selected_tasks:
             if task not in SUPPORTED_TASKS:
                 raise ValueError(
@@ -64,35 +62,42 @@ def validate_tasks(self):
             self._check_weights(task, "penalty_definition")
 
     def _check_weights(self, task, definition):
-
         total_weight = 0
 
         model_infos = getattr(SUPPORTED_TASKS[task], definition)
-        
+
         for model_info in model_infos:
-            
             if not isinstance(model_info, dict):
-                raise ValueError(f"{definition} model {model_info} is not a dictionary.")
+                raise ValueError(
+                    f"{definition} model {model_info} is not a dictionary."
+                )
             if "weight" not in model_info:
-                raise ValueError(f"{definition} model {model_info} does not have a weight.")
+                raise ValueError(
+                    f"{definition} model {model_info} does not have a weight."
+                )
 
             weight = model_info["weight"]
             if not isinstance(weight, float):
-                raise ValueError(f"{definition} model {model_info} weight is not a float.")
+                raise ValueError(
+                    f"{definition} model {model_info} weight is not a float."
+                )
             if not 0 <= weight <= 1:
-                raise ValueError(f"{definition} model {model_info} weight is not between 0 and 1.")
+                raise ValueError(
+                    f"{definition} model {model_info} weight is not between 0 and 1."
+                )
 
             total_weight += weight
 
         if model_infos and total_weight != 1:
-            raise ValueError(f"{definition} model {model_infos} weights do not sum to 1 (sum={total_weight})")
+            raise ValueError(
+                f"{definition} model {model_infos} weights do not sum to 1 (sum={total_weight})"
+            )
 
     def load_pipeline(self):
         """Dynamically loads the reward models required by the selected tasks so that we only use the necessary resources."""
         active_reward_models = []
 
         for task in self.selected_tasks:
-
             active_reward_models += SUPPORTED_TASKS[task].reward_definition
             active_reward_models += SUPPORTED_TASKS[task].penalty_definition
 
@@ -106,7 +111,7 @@ def load_pipeline(self):
                 raise ValueError(
                     f"Reward model {name} not supported. Please choose from {REWARD_MODELS.keys()}"
                 )
-            elif name in reward_models: # Prevents duplicate reward models
+            elif name in reward_models:  # Prevents duplicate reward models
                 continue
 
             cls = REWARD_MODELS[name]
@@ -115,4 +120,3 @@ def load_pipeline(self):
             reward_models[name] = cls(device=self.device, **params)
 
         self.reward_models = reward_models
-
diff --git a/prompting/rewards/relevance.py b/prompting/rewards/relevance.py
index a6f4f694..b754ae33 100644
--- a/prompting/rewards/relevance.py
+++ b/prompting/rewards/relevance.py
@@ -20,13 +20,11 @@ def __init__(self, threshold=None, device=None, pooling_strategy="cls"):
         self.threshold = threshold
         self.model = AnglE.from_pretrained(
             "WhereIsAI/UAE-Large-V1", pooling_strategy=pooling_strategy, device=device
-        )        
+        )
         if device.startswith("cuda"):
             # This line is necessary to pass the model to the device defined at its initialization
             self.model = self.model.cuda()
 
-
-
     def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
         """Calculates the cosine similarity between sentence embeddings of the reference and completions.
         We subtract a baseline score which is what an empty string would get (a failed completion). This is usually around 0.35
diff --git a/prompting/rewards/reward.py b/prompting/rewards/reward.py
index 8d6854f0..e51a7cd7 100644
--- a/prompting/rewards/reward.py
+++ b/prompting/rewards/reward.py
@@ -16,6 +16,7 @@ class RewardModelTypeEnum(Enum):
 @dataclass
 class RewardEvent:
     """Contains rewards for all the responses in a batch"""
+
     model_name: str
     rewards: torch.FloatTensor
     rewards_normalized: torch.FloatTensor
@@ -52,25 +53,26 @@ def __init__(self, reward_pipeline, agent, response_event, device):
         self.task_rewards = agent.task.reward_definition
         self.task_penalties = agent.task.penalty_definition
         self.reward_events = self.reward_responses(
-            reference=agent.task.reference, 
+            reference=agent.task.reference,
             models=self.task_rewards,
-            reward_type=RewardModelTypeEnum.WEIGHTED_REWARD
+            reward_type=RewardModelTypeEnum.WEIGHTED_REWARD,
         )
         self.penalty_events = self.reward_responses(
-            reference=agent.challenge, 
+            reference=agent.challenge,
             models=self.task_penalties,
-            reward_type=RewardModelTypeEnum.PENALTY
+            reward_type=RewardModelTypeEnum.PENALTY,
         )
         self.rewards = self.total_reward()
 
     def __state_dict__(self, full=False):
-
         state = {"rewards": self.rewards.tolist()}
-        for event in self.reward_events+self.penalty_events:
+        for event in self.reward_events + self.penalty_events:
             state.update(event.asdict())
         return state
 
-    def reward_responses(self, reference: str, models: List[dict], reward_type: RewardModelTypeEnum) -> List[RewardEvent]:
+    def reward_responses(
+        self, reference: str, models: List[dict], reward_type: RewardModelTypeEnum
+    ) -> List[RewardEvent]:
         """Calculates the rewards for the responses given the task and returns a RewardEvent for each reward model
         reward_events: List[RewardEvent] = [
             RewardEvent(model_name='rouge', rewards=torch.zeros(50), timings=torch.zeros(50), ...),
@@ -80,7 +82,6 @@ def reward_responses(self, reference: str, models: List[dict], reward_type: Rewa
         reward_events = []
 
         for reward_info in models:
-
             # Select the reward model from preloaded reward model pipeline
             reward_model = self.reward_pipeline.get(reward_info["name"])
             if not reward_model:
@@ -88,7 +89,9 @@ def reward_responses(self, reference: str, models: List[dict], reward_type: Rewa
                     f"Reward model {reward_info['name']} not supported. Please choose from {self.reward_pipeline.keys()}"
                 )
             # Compute the rewards for the responses given the prompt
-            reward_event = reward_model.apply(reference, self.response_event, reward_type=reward_type)
+            reward_event = reward_model.apply(
+                reference, self.response_event, reward_type=reward_type
+            )
             reward_events.append(reward_event)
 
         return reward_events
@@ -98,15 +101,21 @@ def total_reward(self) -> torch.FloatTensor:
 
         # TODO: How would using the Agent as a reward model fit into this flow?
         # Compute the rewards for the responses given the prompt
-        rewards = torch.zeros_like(self.response_event.uids, dtype=torch.float32, device=self.device)
+        rewards = torch.zeros_like(
+            self.response_event.uids, dtype=torch.float32, device=self.device
+        )
 
         for event in self.reward_events:
-             for reward_info in filter(lambda x: x['name'] == event.model_name, self.task_rewards):
+            for reward_info in filter(
+                lambda x: x["name"] == event.model_name, self.task_rewards
+            ):
                 rewards += reward_info["weight"] * event.rewards.to(self.device)
 
         for event in self.penalty_events:
-            for reward_info in filter(lambda x: x['name'] == event.model_name, self.task_penalties):
-                rewards *= (1 - reward_info["weight"] * event.rewards.to(self.device))
+            for reward_info in filter(
+                lambda x: x["name"] == event.model_name, self.task_penalties
+            ):
+                rewards *= 1 - reward_info["weight"] * event.rewards.to(self.device)
 
         return rewards
 
@@ -119,12 +128,16 @@ class BatchRewardOutput:
     rewards: torch.FloatTensor
     timings: torch.FloatTensor
     extra_info: dict
-    
+
     def __post_init__(self):
         if self.rewards.shape != self.timings.shape:
-            raise ValueError(f"rewards.shape {self.rewards.shape} != timings.shape {self.timings.shape}")
-        
-        self.rewards_normalized = (self.rewards-self.rewards.min())/(self.rewards.max()-self.rewards.min()+1e-6)
+            raise ValueError(
+                f"rewards.shape {self.rewards.shape} != timings.shape {self.timings.shape}"
+            )
+
+        self.rewards_normalized = (self.rewards - self.rewards.min()) / (
+            self.rewards.max() - self.rewards.min() + 1e-6
+        )
 
 
 class BaseRewardModel(ABC):
@@ -133,23 +146,17 @@ class BaseRewardModel(ABC):
     def name(self) -> str:
         ...
 
-
     @abstractmethod
     def __init__(self, **kwargs):
         pass
 
     @abstractmethod
-    def reward(
-        self, reference: str, completions: List[str]
-    ) -> BatchRewardOutput:
+    def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
         pass
 
     def apply(self, reference: str, response_event, reward_type) -> RewardEvent:
-
         t0 = time.time()
-        batch_rewards_output = self.reward(
-            reference, response_event.completions
-        )
+        batch_rewards_output = self.reward(reference, response_event.completions)
         batch_rewards_time = time.time() - t0
 
         return RewardEvent(
@@ -162,6 +169,5 @@ def apply(self, reference: str, response_event, reward_type) -> RewardEvent:
             timings=batch_rewards_output.timings,
         )
 
-
     def __repr__(self):
         return f"{self.__class__.__name__}(name={self.name})"
diff --git a/prompting/rewards/rouge.py b/prompting/rewards/rouge.py
index 1c5bc66a..c06d236f 100644
--- a/prompting/rewards/rouge.py
+++ b/prompting/rewards/rouge.py
@@ -28,9 +28,7 @@ def rouge_score(self, reference, completion):
             self.ngram
         ][self.metric]
 
-    def reward(
-        self, reference: str, completions: List[str]
-    ) -> BatchRewardOutput:
+    def reward(self, reference: str, completions: List[str]) -> BatchRewardOutput:
         """Compute ROUGE scores given a completion and reference pair."""
         rewards = []
         timings = []
diff --git a/prompting/tasks/date_qa.py b/prompting/tasks/date_qa.py
index 3f48c6ae..81fcff1e 100644
--- a/prompting/tasks/date_qa.py
+++ b/prompting/tasks/date_qa.py
@@ -11,8 +11,6 @@ class DateQuestionAnsweringTask(Task):
     penalty_definition = []
 
     def __init__(self, llm_pipeline, context, create_reference=True):
-
-
         self.name = "date-based question answering"
         self.desc = "get help answering a specific date-based question"
         self.goal = "to get the answer to the following date-based question"
@@ -28,9 +26,9 @@ def __init__(self, llm_pipeline, context, create_reference=True):
         year, _, *event = self.context["event"].split()
         event = " ".join(event)
 
-        options = {'Births':' was born ', 'Deaths':' died ', 'Events':' '}
+        options = {"Births": " was born ", "Deaths": " died ", "Events": " "}
 
-        self.query = event.strip(".") + options[section] + 'on what exact date?'
+        self.query = event.strip(".") + options[section] + "on what exact date?"
         self.reference = self.context["date"] + ", " + year.strip()
 
         self.topic = section
@@ -38,4 +36,3 @@ def __init__(self, llm_pipeline, context, create_reference=True):
         self.tags = []
         self.static_reference = True
         self.static_query = True
-
diff --git a/prompting/tasks/debugging.py b/prompting/tasks/debugging.py
index 1ec1a3fb..0783f077 100644
--- a/prompting/tasks/debugging.py
+++ b/prompting/tasks/debugging.py
@@ -68,9 +68,7 @@ def remove(code, n, sep=" ", min_length=1, max_length=10):
             f"Removing the following {len(indices)} chunks: {[chunks[i] for i in indices]} at indices {indices}"
         )
 
-        return sep.join(
-            [chunk for i, chunk in enumerate(chunks) if i not in indices]
-        )
+        return sep.join([chunk for i, chunk in enumerate(chunks) if i not in indices])
 
     def swap(code, sep=" ", min_length=1, max_length=10):
         """Swap two random chunks in the code. Chunks can be characters, words, or lines."""
@@ -109,9 +107,7 @@ def swap(code, sep=" ", min_length=1, max_length=10):
 
     # spread n corruptions across the code
     for i in range(n_remove):
-        code = remove(
-            code, n=1, sep=sep, min_length=min_length, max_length=max_length
-        )
+        code = remove(code, n=1, sep=sep, min_length=min_length, max_length=max_length)
     for i in range(n_swap):
         code = swap(code, sep=sep, min_length=min_length, max_length=max_length)
 
@@ -120,20 +116,15 @@ def swap(code, sep=" ", min_length=1, max_length=10):
 
 def diff(query, reference):
     """Get the diff between two strings."""
-    return "\n".join(
-        difflib.unified_diff(query.splitlines(), reference.splitlines())
-    )
+    return "\n".join(difflib.unified_diff(query.splitlines(), reference.splitlines()))
 
 
 @dataclass
 class DebuggingTask(Task):
-    reward_definition = [
-        dict(name="diff", lines=False, threshold=0.5, weight=1.0)
-    ]
+    reward_definition = [dict(name="diff", lines=False, threshold=0.5, weight=1.0)]
     penalty_definition = []
 
     def __init__(self, llm_pipeline, context, create_reference=True):
-
         self.name = "debugging"
         self.desc = "get help with debugging"
         self.goal = "ask for help fixing the broken piece of code. When asking for help do not adjust the code in any way."
@@ -146,10 +137,10 @@ def __init__(self, llm_pipeline, context, create_reference=True):
         if create_reference:
             self.reference = self.generate_reference()
 
-        self.delimiter="```"
-        self.topic=self.context["repo_name"]
-        self.subtopic=self.context["path"]
-        self.tags=[self.context["language"]]
+        self.delimiter = "```"
+        self.topic = self.context["repo_name"]
+        self.subtopic = self.context["path"]
+        self.tags = [self.context["language"]]
         self.static_reference = True
         self.static_query = True
 
@@ -179,4 +170,4 @@ def generate_reference(self, llm=None):
         return self.context["code"]
 
     def format_challenge(self, challenge):
-        return f'{challenge}\n{self.delimiter}\n{self.query}\n{self.delimiter}'
\ No newline at end of file
+        return f"{challenge}\n{self.delimiter}\n{self.query}\n{self.delimiter}"
diff --git a/prompting/tasks/generic_instruction.py b/prompting/tasks/generic_instruction.py
index ac883031..b9aec5d2 100644
--- a/prompting/tasks/generic_instruction.py
+++ b/prompting/tasks/generic_instruction.py
@@ -86,15 +86,11 @@ def __init__(self, llm_pipeline):
         )
 
         self.criteria = self.create_criteria(llm_pipeline)
-        instruction, reference = self.create_instruction_and_reference(
-            llm_pipeline
-        )
+        instruction, reference = self.create_instruction_and_reference(llm_pipeline)
         self.challenge = instruction
         self.reference = reference
 
-    def extract_instruction_and_reference_from_text(
-        self, text: str
-    ) -> Tuple[str, str]:
+    def extract_instruction_and_reference_from_text(self, text: str) -> Tuple[str, str]:
         # Split the text into problem and response using regular expression
         split_text = re.split(r"\nResponse:\n", text)
 
diff --git a/prompting/tasks/math.py b/prompting/tasks/math.py
index eac4eb1a..61e244f2 100644
--- a/prompting/tasks/math.py
+++ b/prompting/tasks/math.py
@@ -7,32 +7,30 @@
 @dataclass
 class MathTask(Task):
     reward_definition = [
-        dict(name='float_diff', weight = 1.0),
+        dict(name="float_diff", weight=1.0),
     ]
     penalty_definition = []
 
     def __init__(self, llm_pipeline, context, create_reference=True):
-        
         reference = context["solution"]
-        
+
         try:
             float(reference)
         except:
-            raise ValueError(f"Solution {reference} is not a float.") 
+            raise ValueError(f"Solution {reference} is not a float.")
+
+        self.name = "math"
+        self.desc = "get help solving a math problem"
+        self.goal = "to get the answer to the following math question"
 
-        self.name="math"
-        self.desc="get help solving a math problem"
-        self.goal="to get the answer to the following math question"
-        
         self.context = context
 
         query = "How can I solve, " + context["problem"] + "?"
-        
-        self.query=query
-        self.reference=str(reference)
-        self.topic=context["topic"]
-        self.subtopic=context["subtopic"]
-        self.tags=[]
-        self.static_reference=True
-        self.static_query=True
-        
+
+        self.query = query
+        self.reference = str(reference)
+        self.topic = context["topic"]
+        self.subtopic = context["subtopic"]
+        self.tags = []
+        self.static_reference = True
+        self.static_query = True
diff --git a/prompting/tasks/qa.py b/prompting/tasks/qa.py
index 14680142..fec67f94 100644
--- a/prompting/tasks/qa.py
+++ b/prompting/tasks/qa.py
@@ -40,7 +40,6 @@
 
 @dataclass
 class QuestionAnsweringTask(Task):
-    
     reward_definition = [
         dict(name="rouge", ngram="rouge-1", metric="f", weight=0.5),
         dict(name="relevance", threshold=None, weight=0.5),
@@ -50,7 +49,6 @@ class QuestionAnsweringTask(Task):
     ]
 
     def __init__(self, llm_pipeline, context, create_reference=True):
-
         self.name = "question-answering"
         self.desc = "get help on answering a question"
         self.goal = "to get the answer to the following question"
@@ -63,15 +61,12 @@ def __init__(self, llm_pipeline, context, create_reference=True):
         self.context = context
 
         self.query_system_prompt = QUERY_SYSTEM_PROMPT
-        self.query_prompt = QUERY_PROMPT_TEMPLATE.format(
-            context = self.context["text"]
-        )
+        self.query_prompt = QUERY_PROMPT_TEMPLATE.format(context=self.context["text"])
         self.query = self.generate_query(llm_pipeline)
 
-
         self.reference_system_prompt = REFERENCE_SYSTEM_PROMPT
         self.reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(
-            context = self.context["text"], question = self.query
+            context=self.context["text"], question=self.query
         )
         if create_reference:
             self.reference = self.generate_reference(llm_pipeline)
@@ -79,4 +74,3 @@ def __init__(self, llm_pipeline, context, create_reference=True):
         self.topic = self.context["title"]
         self.subtopic = self.context["categories"][0]
         self.tags = self.context["categories"]
-
diff --git a/prompting/tasks/summarization.py b/prompting/tasks/summarization.py
index 70f59c08..75c073cd 100644
--- a/prompting/tasks/summarization.py
+++ b/prompting/tasks/summarization.py
@@ -26,17 +26,13 @@
 
 @dataclass
 class SummarizationTask(Task):
-    
     reward_definition = [
         dict(name="rouge", ngram="rouge-l", metric="f", weight=0.5),
         dict(name="relevance", threshold=None, weight=0.5),
     ]
-    penalty_definition = [
-        dict(name="rouge", ngram="rouge-1", metric="f", weight=1.0)
-    ]
+    penalty_definition = [dict(name="rouge", ngram="rouge-1", metric="f", weight=1.0)]
 
     def __init__(self, llm_pipeline: Pipeline, context: str, create_reference=True):
-
         self.name = "summarization"
         self.desc = "get help with summarization"
         self.goal = "summarize the following topic"
@@ -64,7 +60,7 @@ def __init__(self, llm_pipeline: Pipeline, context: str, create_reference=True):
 
         self.reference_system_prompt = SUMMARIZATION_SYSTEM_PROMPT
         self.reference_prompt = REFERENCE_PROMPT_TEMPLATE.format(
-            context = self.context["text"]
+            context=self.context["text"]
         )
         if create_reference:
             self.reference = self.generate_reference(llm_pipeline)
@@ -73,4 +69,3 @@ def __init__(self, llm_pipeline: Pipeline, context: str, create_reference=True):
         self.subtopic = self.context["categories"][0]
         self.tags = self.context["categories"]
         self.static_query = True
-
diff --git a/prompting/tasks/task.py b/prompting/tasks/task.py
index 263e63e5..0a4e314f 100644
--- a/prompting/tasks/task.py
+++ b/prompting/tasks/task.py
@@ -28,7 +28,7 @@ class Task(ABC):
     subtopic: str
     tags: List[str]
     context: dict
-    reward_definition: List[dict] 
+    reward_definition: List[dict]
     penalty_definition: List[dict] = None
     reward_threshold: float = 0.0
     reference: Union[str, List[str]] = ""
@@ -112,4 +112,3 @@ def generate_query(self, llm: Pipeline, clean=True) -> str:
     def format_challenge(self, challenge) -> str:
         """Formats the challenge to be used for the conversation"""
         return challenge
-
diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index 0abb1875..6093876e 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -22,6 +22,7 @@
 import bittensor as bt
 from loguru import logger
 
+
 def check_config(cls, config: "bt.Config"):
     r"""Checks/validates the config namespace object."""
     bt.logging.check_config(config)
@@ -35,7 +36,7 @@ def check_config(cls, config: "bt.Config"):
             config.neuron.name,
         )
     )
-    bt.logging.info(f'Logging path: {full_path}')
+    bt.logging.info(f"Logging path: {full_path}")
     config.neuron.full_path = os.path.expanduser(full_path)
     if not os.path.exists(config.neuron.full_path):
         os.makedirs(config.neuron.full_path, exist_ok=True)
@@ -130,6 +131,7 @@ def add_args(cls, parser):
         default="",
     )
 
+
 def add_miner_args(cls, parser):
     """Add miner specific arguments to the parser."""
 
@@ -137,7 +139,7 @@ def add_miner_args(cls, parser):
         "--neuron.name",
         type=str,
         help="Trials for this neuron go in neuron.root / (wallet_cold - wallet_hot) / neuron.name. ",
-        default='miner',
+        default="miner",
     )
 
     parser.add_argument(
@@ -179,7 +181,7 @@ def add_miner_args(cls, parser):
         "--neuron.system_prompt",
         type=str,
         help="The system prompt to use for the miner.",
-        default="You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
+        default="You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know.",
     )
 
     parser.add_argument(
@@ -222,7 +224,7 @@ def add_miner_args(cls, parser):
         type=bool,
         default=False,
         help="Force model loading independent of mock flag.",
-    ) 
+    )
 
     parser.add_argument(
         "--wandb.on",
@@ -245,6 +247,7 @@ def add_miner_args(cls, parser):
         help="Wandb project to log to.",
     )
 
+
 def add_validator_args(cls, parser):
     """Add validator specific arguments to the parser."""
 
@@ -252,7 +255,7 @@ def add_validator_args(cls, parser):
         "--neuron.name",
         type=str,
         help="Trials for this neuron go in neuron.root / (wallet_cold - wallet_hot) / neuron.name. ",
-        default='validator',
+        default="validator",
     )
 
     parser.add_argument(
@@ -334,9 +337,9 @@ def add_validator_args(cls, parser):
         "--neuron.vpermit_tao_limit",
         type=int,
         help="The maximum number of TAO allowed to query a validator with a vpermit.",
-            default=4096,
-        )
-    
+        default=4096,
+    )
+
     parser.add_argument(
         "--wandb.project_name",
         type=str,
@@ -351,20 +354,20 @@ def add_validator_args(cls, parser):
         default="opentensor-dev",
     )
 
-
     parser.add_argument(
         "--neuron.query_unique_coldkeys",
         action="store_true",
         help="Only query a single hotkey per coldkey.",
         default=False,
-        )
+    )
 
     parser.add_argument(
         "--neuron.query_unique_ips",
         action="store_true",
         help="Only query a single hotkey per ip.",
         default=False,
-        )
+    )
+
 
 def config(cls):
     """
diff --git a/prompting/utils/logging.py b/prompting/utils/logging.py
index 73df3009..381d68b6 100644
--- a/prompting/utils/logging.py
+++ b/prompting/utils/logging.py
@@ -9,6 +9,7 @@
 from loguru import logger
 import prompting
 
+
 @dataclass
 class Log:
     validator_model_id: str
@@ -24,6 +25,7 @@ class Log:
     task: dict
     # extra_info: dict
 
+
 def export_logs(logs: List[Log]):
     bt.logging.info("📝 Exporting logs...")
 
@@ -102,9 +104,7 @@ def reinit_wandb(self):
     init_wandb(self, reinit=True)
 
 
-
 def log_event(self, event):
-
     if not self.config.neuron.dont_save_events:
         logger.log("EVENTS", "events", **event)
 
diff --git a/prompting/utils/uids.py b/prompting/utils/uids.py
index 9376438e..5e4a2225 100644
--- a/prompting/utils/uids.py
+++ b/prompting/utils/uids.py
@@ -5,7 +5,11 @@
 
 
 def check_uid_availability(
-    metagraph: "bt.metagraph.Metagraph", uid: int, vpermit_tao_limit: int, coldkeys: set = None, ips: set = None,
+    metagraph: "bt.metagraph.Metagraph",
+    uid: int,
+    vpermit_tao_limit: int,
+    coldkeys: set = None,
+    ips: set = None,
 ) -> bool:
     """Check if uid is available. The UID should be available if it is serving and has less than vpermit_tao_limit stake
     Args:
@@ -21,10 +25,12 @@ def check_uid_availability(
     if not metagraph.axons[uid].is_serving:
         bt.logging.debug(f"uid: {uid} is not serving")
         return False
-      
+
     # Filter validator permit > 1024 stake.
     if metagraph.validator_permit[uid] and metagraph.S[uid] > vpermit_tao_limit:
-        bt.logging.debug(f"uid: {uid} has vpermit and stake ({metagraph.S[uid]}) > {vpermit_tao_limit}")
+        bt.logging.debug(
+            f"uid: {uid} has vpermit and stake ({metagraph.S[uid]}) > {vpermit_tao_limit}"
+        )
         return False
 
     if coldkeys and metagraph.axons[uid].coldkey in coldkeys:
@@ -37,9 +43,7 @@ def check_uid_availability(
     return True
 
 
-def get_random_uids(
-    self, k: int, exclude: List[int] = None
-) -> torch.LongTensor:
+def get_random_uids(self, k: int, exclude: List[int] = None) -> torch.LongTensor:
     """Returns k available random uids from the metagraph.
     Args:
         k (int): Number of uids to return.
@@ -58,7 +62,11 @@ def get_random_uids(
             continue
 
         uid_is_available = check_uid_availability(
-            self.metagraph, uid, self.config.neuron.vpermit_tao_limit, coldkeys, ips,
+            self.metagraph,
+            uid,
+            self.config.neuron.vpermit_tao_limit,
+            coldkeys,
+            ips,
         )
         if not uid_is_available:
             continue

From d1255aae74c70122d22b716c3be6a4543706478a Mon Sep 17 00:00:00 2001
From: mccrindlebrian <mccrinbc@gmail.com>
Date: Wed, 28 Feb 2024 14:26:33 -0800
Subject: [PATCH 21/34] depreciation on agent miners

---
 neurons/.DS_Store               | Bin 0 -> 6148 bytes
 neurons/miners/.DS_Store        | Bin 0 -> 6148 bytes
 neurons/miners/agent/README.md  |  34 ---------------------
 neurons/miners/agent/miner.py   |  30 ------------------
 prompting/miners/agent_miner.py |  52 +++-----------------------------
 5 files changed, 4 insertions(+), 112 deletions(-)
 create mode 100644 neurons/.DS_Store
 create mode 100644 neurons/miners/.DS_Store
 delete mode 100644 neurons/miners/agent/README.md
 delete mode 100644 neurons/miners/agent/miner.py

diff --git a/neurons/.DS_Store b/neurons/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..3c9db54f1e0661e74902dfecc04465c457000f92
GIT binary patch
literal 6148
zcmeH~L5tHs6vy9mySph>=s{sU2m@Zr+HMy?yu|K$@M=U4Dlut_8_Z@(lWvhx$X&mX
zU&ODY|2H$M)~dG^mG|Jyf9B=A4EasSWQa(O7uk@gM??XfwbeoM6XWabTh{WCU8u76
zm{39!I-@DYTiFu$8wv2*b+KA8&FCH0?%Vpsj|($N3zH&)Uyq0I!_U^xBif^L%BjR!
z&#BtXRkwean=Y%FzwgI%R^(MZ7<^K#o%ZhDey|^OgICd+Sw?kIFRF1ef5D@t#uVvg
zKS`hE(`DSdabn6k$;;{77UbC!DKDSrWoDLRvnaF5)<*6^&<Q$m@78K{G#K^u=y<s9
z>(%gRbic2UNB7q2PH^+i-3L$Nx5asB-rGkch1c5Jb+4E50i6v17ty>dO!)?JuDM1R
zKv@7#2Cc~e(jg=M0Tj>kJyZ<Egn$qb0#{GKf82uZ)lVfe2mvARj}hSSgNCz)UK^+O
z(}Bhw0ibP!t)b69i(I3<hF%+|xB^qI6zWQqKVm3Xj=1)Dq1VQ#D<|a-AIi_H{0T+b
z*>Qhu(@BL+r4<4~V3WX(8}@krfAs6~|7MYlgn$tEuL!92X>>ZqBl*2`>*08>)$rGF
pHqL9Del0=AZpFyut@r?L4ROsKXy~<ZiU>@81S|$=guovq@C8J0b9w*(

literal 0
HcmV?d00001

diff --git a/neurons/miners/.DS_Store b/neurons/miners/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..71828c90e7db86e15626ee5f1f39d3d20b4e30e0
GIT binary patch
literal 6148
zcmeHKL2uJA6n^fqEnS7w14wW{io~@V?K&Z$T|zewTq%MBppvYijTY>xO{u1;Qtsow
zaORKjUpT?{Y*%X2_F7HIuX_F*`+d*J=f;kSNOY$C7Ezsud^n?bf?|ttKl_GNYzy;*
z&XH11J^W5-v=QwZoB~dP`=$WDyEXmJrX*?g{k?sYsp~jXF(P<*_z6G#s|~d!Yectn
zO%Z%Z3Fj%XUX@sfh#eqOQeKMO%s-?lVh6Y@iSnx+C0S>Qr=|Qg+{YGHJe!Q{v;2xj
zSvtv^&EKMO;2j<v`^SFG|J)s_SvQaJ>7)~lKl0T(rLuTgkK*@fGV9eJUMiVKsZ7S2
zkoFUVeEN{ezM6H^RQ4yD+ch12)vxyIPv-NpX1fuz&s&Q|FmIi;Up9jC_KU@$>OXq+
z{NiozCi^DU9jl6!6icbL2Nsv`1C<?*_q{uonUVuUF#CuU7GM@RqO>F!kFq+A9+vEc
zCgtf6f40Oa<jvOeRd~$Rd?z%+gx$b<yQX?$(vZU0{uQu)G{3h0q1}j10jIz{SAf@t
z0L~a%EDY+U1BE^U06jEoL!Cbsm=jnGEfxmR0~3Y{G*n@a7{bue9@x0hVqwtGN!Y`O
zup<k5LJ@j&<PUT>iO`@codQmQbp;O0Wu5o`*SpXE>m+yO6mSasR|*L4s(aPJBiX%m
y<KcL(4d7qkY#dh@)D#qUJJuE6iXX$Zp)cSHFtk`0L<`J)2xu8x;S|`b0{;Mgwzi=F

literal 0
HcmV?d00001

diff --git a/neurons/miners/agent/README.md b/neurons/miners/agent/README.md
deleted file mode 100644
index 0d0b8a7a..00000000
--- a/neurons/miners/agent/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# WikiAgent Bittensor Miner
-This repository contains a Bittensor Miner that uses a simple ReACT langchain agent to retrieve data from OpenAI's model alongside the wikipedia tool. The miner connects to the Bittensor network, registers its wallet, and serves the GPT model to the network.
-
-## Prerequisites
-
-- Python 3.8+
-- OpenAI Python API (https://github.com/openai/openai)
-
-## Installation
-
-1. Clone the repository 
-```bash
-git clone https://github.com/opentensor/prompting.git
-```
-
-2. Install the required packages for the [repository requirements](../../../requirements.txt) with `pip install -r requirements.txt`
-3. Install the required packages for the [wikipedia agent miner](requirements.txt) with `pip install -r requirements.txt`
-3. Ensure that you have a `.env` file with your `OPENAI_API` key
-```.env
-echo OPENAI_API_KEY=YOUR-KEY > .env
-```
-
-For more configuration options related to the wallet, axon, subtensor, logging, and metagraph, please refer to the Bittensor documentation.
-
-## Example Usage
-
-To run the WikiAgent Bittensor Miner with default settings, we recommend using the model `gpt-3.5-turbo-16k` or any model with a big context window. You can run the miner using the following command:
-
-```bash
-python3 neurons/miners/wiki_agent/miner.py \
-    --wallet.name <<your-wallet-name>> \
-    --wallet.hotkey <<your-hotkey>>
-    --neuron.model_id gpt-3.5-turbo-16k
-```
\ No newline at end of file
diff --git a/neurons/miners/agent/miner.py b/neurons/miners/agent/miner.py
deleted file mode 100644
index 267a8688..00000000
--- a/neurons/miners/agent/miner.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# The MIT License (MIT)
-# Copyright © 2024 Yuma Rao
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
-# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
-# the Software.
-
-# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
-# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-import time
-import bittensor as bt
-from prompting.miners import AgentMiner
-
-# This is the main function, which runs the miner.
-if __name__ == "__main__":
-    with AgentMiner() as miner:
-        while True:
-            bt.logging.info("Miner running...", time.time())
-            time.sleep(5)
-
-            if miner.should_exit:
-                bt.logging.warning("Ending miner...")
-                break
diff --git a/prompting/miners/agent_miner.py b/prompting/miners/agent_miner.py
index cd537be4..7d51bacd 100644
--- a/prompting/miners/agent_miner.py
+++ b/prompting/miners/agent_miner.py
@@ -18,6 +18,7 @@
 import time
 import bittensor as bt
 import argparse
+from deprecation import deprecated
 
 # Bittensor Miner Template:
 from prompting.protocol import PromptingSynapse
@@ -28,7 +29,7 @@
 from prompting.miners.agents import SingleActionAgent, ReactAgent
 from langchain.callbacks import get_openai_callback
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class AgentMiner(BasePromptingMiner):
     """Langchain-based miner which uses OpenAI's API as the LLM. This uses the ReAct framework.
 
@@ -108,50 +109,5 @@ def get_cost_logging(self, cb):
         }
 
     async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
-        """
-        Processes the incoming synapse by performing a predefined operation on the input data.
-        This method should be replaced with actual logic relevant to the miner's purpose.
-
-        Args:
-            synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
-
-        Returns:
-            PromptingSynapse: The synapse object with the '`dummy_output' field set to twice the 'dummy_input' value.
-
-        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
-        the miner's intended operation. This method demonstrates a basic transformation of input data.
-        """
-        try:
-            with get_openai_callback() as cb:
-                t0 = time.time()
-                bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
-
-                message = synapse.messages[-1]
-
-                bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")
-
-                response = self.agent.run(message)
-
-                synapse.completion = response
-                synapse_latency = time.time() - t0
-
-                if self.config.wandb.on:
-                    self.log_event(
-                        timing=synapse_latency,
-                        prompt=message,
-                        completion=response,
-                        system_prompt="",
-                        extra_info=self.get_cost_logging(cb),
-                    )
-
-            bt.logging.debug(f"✅ Served Response: {response}")
-            self.step += 1
-
-            return synapse
-        except Exception as e:
-            bt.logging.error(f"Error in forward: {e}")
-            synapse.completion = "Error: " + str(e)
-        finally:
-            if self.config.neuron.stop_on_forward_exception:
-                self.should_exit = True
-            return synapse
+        self.should_exit = True 
+        return synapse 

From 46f798e96a3ee9245f760539a1fc2e7a2e176c31 Mon Sep 17 00:00:00 2001
From: mccrindlebrian <mccrinbc@gmail.com>
Date: Wed, 28 Feb 2024 14:39:35 -0800
Subject: [PATCH 22/34] add depreciation to reqs

---
 requirements.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/requirements.txt b/requirements.txt
index 2f94f608..9185969d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,6 +3,7 @@ bittensor==6.6.0
 bs4
 click==8.1.3
 datasets==2.14.6
+deprecation==2.1.0
 torch==2.1.1
 torchmetrics
 transformers==4.36.2

From 51de23f948201e9d22716e24670d13058a5f78a6 Mon Sep 17 00:00:00 2001
From: mccrindlebrian <mccrinbc@gmail.com>
Date: Wed, 28 Feb 2024 14:41:27 -0800
Subject: [PATCH 23/34] add log_status

---
 neurons/miners/huggingface/miner.py | 2 +-
 prompting/base/prompting_miner.py   | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index d0c65891..29eaa640 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -23,7 +23,7 @@
 if __name__ == "__main__":
     with HuggingFaceMiner() as miner:
         while True:
-            bt.logging.info("Miner running...", time.time())
+            miner.log_status()
             time.sleep(5)
 
             if miner.should_exit:
diff --git a/prompting/base/prompting_miner.py b/prompting/base/prompting_miner.py
index 2e1a123f..6c560f37 100644
--- a/prompting/base/prompting_miner.py
+++ b/prompting/base/prompting_miner.py
@@ -183,3 +183,9 @@ def log_event(
 
         bt.logging.info("Logging event to wandb...", step_log)
         wandb.log(step_log)
+
+    def log_status(self):
+        m = self.metagraph
+        bt.logging.info(
+            f"Miner running:: network: {self.subtensor.network} | block: {self.block} | step: {self.step} | uid: {self.uid} | last updated: {self.block-m.last_update[self.uid]} | trust: {m.trust[self.uid]:.3f} | emission {m.emission[self.uid]:.3f}"
+        )

From 39d8ad02cf697790c96a94dcc338b20c3aa192c4 Mon Sep 17 00:00:00 2001
From: mccrindlebrian <mccrinbc@gmail.com>
Date: Wed, 28 Feb 2024 14:46:24 -0800
Subject: [PATCH 24/34] change version to 1.1.2

---
 prompting/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/prompting/__init__.py b/prompting/__init__.py
index f7cfdd47..e3b13d0f 100644
--- a/prompting/__init__.py
+++ b/prompting/__init__.py
@@ -16,7 +16,7 @@
 # DEALINGS IN THE SOFTWARE.
 
 # Define the version of the template module.
-__version__ = "1.1.1"
+__version__ = "1.1.2"
 version_split = __version__.split(".")
 __spec_version__ = (
     (10000 * int(version_split[0]))

From 493e7e4a29276179281c76e9a2789b22bee49ce9 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 12:58:09 -0500
Subject: [PATCH 25/34] Update prompting/miners/agents/react_agent.py

Co-authored-by: Steffen Cruz <steffenjcruz@gmail.com>
---
 prompting/miners/agents/react_agent.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index c6e7358c..af106f1e 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -23,7 +23,7 @@ def __init__(
             Tool(
                 name="Wikipedia",
                 func=self.wikipedia.run,
-                description="Useful for when you need to look up a topic, country or person on wikipedia",
+                description="Useful for when you need to look up a topic, event, country or person on wikipedia",
             )
         ]
 

From 7226fa1e61cc001154332c88346b684fc4fcb043 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 13:30:20 -0500
Subject: [PATCH 26/34] Update prompting/miners/openai_miner.py

Co-authored-by: Steffen Cruz <steffenjcruz@gmail.com>
---
 prompting/miners/openai_miner.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index c885e142..5a0e6ee9 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -102,7 +102,7 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
             synapse (PromptingSynapse): The synapse object containing the 'dummy_input' data.
 
         Returns:
-            PromptingSynapse: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value.
+            PromptingSynapse: The synapse object with the 'completion' field set to the miner output
 
         The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
         the miner's intended operation. This method demonstrates a basic transformation of input data.

From 6d34528dc4012a5cf7d9db57f3d6b69e2a7e7a96 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 13:43:57 -0500
Subject: [PATCH 27/34] update docstring

---
 prompting/miners/openai_miner.py | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index 5a0e6ee9..3ccca277 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -103,9 +103,6 @@ async def forward(self, synapse: PromptingSynapse) -> PromptingSynapse:
 
         Returns:
             PromptingSynapse: The synapse object with the 'completion' field set to the miner output
-
-        The 'forward' function is a placeholder and should be overridden with logic that is appropriate for
-        the miner's intended operation. This method demonstrates a basic transformation of input data.
         """
         try:
             with get_openai_callback() as cb:

From 70ab9323021167f26483b6691c4a7c56b51b2e06 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 13:52:16 -0500
Subject: [PATCH 28/34] adds deprecation tags to all agent classes

---
 prompting/miners/agents/base_agent.py          | 2 +-
 prompting/miners/agents/react_agent.py         | 2 +-
 prompting/miners/agents/single_action_agent.py | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/prompting/miners/agents/base_agent.py b/prompting/miners/agents/base_agent.py
index 62e546bf..44266251 100644
--- a/prompting/miners/agents/base_agent.py
+++ b/prompting/miners/agents/base_agent.py
@@ -1,6 +1,6 @@
 from abc import ABC
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class BaseAgent(ABC):
     def run(self, input: str) -> str:
         pass
diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index af106f1e..cc77ede3 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -8,7 +8,7 @@
 from langchain.agents import Tool
 from langchain.tools import WikipediaQueryRun
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class ReactAgent(BaseAgent):
     def __init__(
         self,
diff --git a/prompting/miners/agents/single_action_agent.py b/prompting/miners/agents/single_action_agent.py
index 825f7565..7d894802 100644
--- a/prompting/miners/agents/single_action_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -44,7 +44,7 @@
 Question: {input}
 {agent_scratchpad}"""
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 # Set up a prompt template
 class CustomPromptTemplate(StringPromptTemplate):
     # The template to use
@@ -70,7 +70,7 @@ def format(self, **kwargs) -> str:
         kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
         return self.template.format(**kwargs)
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class CustomOutputParser(AgentOutputParser):
     def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
         # Check if agent should finish
@@ -93,7 +93,7 @@ def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
             tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
         )
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class SingleActionAgent(BaseAgent):
     def __init__(
         self,

From 1ad95719c327c3e7e02184a98296e9fe5b867964 Mon Sep 17 00:00:00 2001
From: Steffen Cruz <steffenjcruz@gmail.com>
Date: Fri, 1 Mar 2024 13:04:59 -0600
Subject: [PATCH 29/34] Manually remove .DS_Store files

---
 neurons/.DS_Store        | Bin 6148 -> 0 bytes
 neurons/miners/.DS_Store | Bin 6148 -> 0 bytes
 prompting/.DS_Store      | Bin 8196 -> 0 bytes
 3 files changed, 0 insertions(+), 0 deletions(-)
 delete mode 100644 neurons/.DS_Store
 delete mode 100644 neurons/miners/.DS_Store
 delete mode 100644 prompting/.DS_Store

diff --git a/neurons/.DS_Store b/neurons/.DS_Store
deleted file mode 100644
index 3c9db54f1e0661e74902dfecc04465c457000f92..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 6148
zcmeH~L5tHs6vy9mySph>=s{sU2m@Zr+HMy?yu|K$@M=U4Dlut_8_Z@(lWvhx$X&mX
zU&ODY|2H$M)~dG^mG|Jyf9B=A4EasSWQa(O7uk@gM??XfwbeoM6XWabTh{WCU8u76
zm{39!I-@DYTiFu$8wv2*b+KA8&FCH0?%Vpsj|($N3zH&)Uyq0I!_U^xBif^L%BjR!
z&#BtXRkwean=Y%FzwgI%R^(MZ7<^K#o%ZhDey|^OgICd+Sw?kIFRF1ef5D@t#uVvg
zKS`hE(`DSdabn6k$;;{77UbC!DKDSrWoDLRvnaF5)<*6^&<Q$m@78K{G#K^u=y<s9
z>(%gRbic2UNB7q2PH^+i-3L$Nx5asB-rGkch1c5Jb+4E50i6v17ty>dO!)?JuDM1R
zKv@7#2Cc~e(jg=M0Tj>kJyZ<Egn$qb0#{GKf82uZ)lVfe2mvARj}hSSgNCz)UK^+O
z(}Bhw0ibP!t)b69i(I3<hF%+|xB^qI6zWQqKVm3Xj=1)Dq1VQ#D<|a-AIi_H{0T+b
z*>Qhu(@BL+r4<4~V3WX(8}@krfAs6~|7MYlgn$tEuL!92X>>ZqBl*2`>*08>)$rGF
pHqL9Del0=AZpFyut@r?L4ROsKXy~<ZiU>@81S|$=guovq@C8J0b9w*(

diff --git a/neurons/miners/.DS_Store b/neurons/miners/.DS_Store
deleted file mode 100644
index 71828c90e7db86e15626ee5f1f39d3d20b4e30e0..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 6148
zcmeHKL2uJA6n^fqEnS7w14wW{io~@V?K&Z$T|zewTq%MBppvYijTY>xO{u1;Qtsow
zaORKjUpT?{Y*%X2_F7HIuX_F*`+d*J=f;kSNOY$C7Ezsud^n?bf?|ttKl_GNYzy;*
z&XH11J^W5-v=QwZoB~dP`=$WDyEXmJrX*?g{k?sYsp~jXF(P<*_z6G#s|~d!Yectn
zO%Z%Z3Fj%XUX@sfh#eqOQeKMO%s-?lVh6Y@iSnx+C0S>Qr=|Qg+{YGHJe!Q{v;2xj
zSvtv^&EKMO;2j<v`^SFG|J)s_SvQaJ>7)~lKl0T(rLuTgkK*@fGV9eJUMiVKsZ7S2
zkoFUVeEN{ezM6H^RQ4yD+ch12)vxyIPv-NpX1fuz&s&Q|FmIi;Up9jC_KU@$>OXq+
z{NiozCi^DU9jl6!6icbL2Nsv`1C<?*_q{uonUVuUF#CuU7GM@RqO>F!kFq+A9+vEc
zCgtf6f40Oa<jvOeRd~$Rd?z%+gx$b<yQX?$(vZU0{uQu)G{3h0q1}j10jIz{SAf@t
z0L~a%EDY+U1BE^U06jEoL!Cbsm=jnGEfxmR0~3Y{G*n@a7{bue9@x0hVqwtGN!Y`O
zup<k5LJ@j&<PUT>iO`@codQmQbp;O0Wu5o`*SpXE>m+yO6mSasR|*L4s(aPJBiX%m
y<KcL(4d7qkY#dh@)D#qUJJuE6iXX$Zp)cSHFtk`0L<`J)2xu8x;S|`b0{;Mgwzi=F

diff --git a/prompting/.DS_Store b/prompting/.DS_Store
deleted file mode 100644
index d31696ad021d79b0cc8d779e05478967f796cf25..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8196
zcmeHML2uJA82#LJYq|>9VMybE6p8CpI<X-Tmr}L^2QDju1E7+uVT+c<Rhq7vs!F}X
zZ{RO*=7+$4;RNsdoK<O>ajcNAEBiUgd-?2VC!RARQk_wMk7$R89Aw6=28xoz*Euhx
zN_J)&sGuhbDWx8MO&Tn*?Fs9Eb-+4c9k32q2mS>Iux4{|ZdmutRa;sItONh01N?q)
zkr~?=&J4<{1BGk>fK{B9ggW*CqH!~9XE-w`Qry$*9#ke(*%m_?Ij*}*2evbu88mWI
zMoub=tg;=7lIYO6LMPQWXiMvWbzs>6-n%zs&mK~|uz$aL8R-*0(mojcP(4R({+5K=
zwQ4|DbV45T83lZwg6dmA<$!w%W{fWd{FR^(Mcps(H^3j_7l1!pytn%NH?BV(4zkvN
z_XklpOk1s=s<KvH->5k?r{R3)p6PKn_0rL><0bF-=(X07e_i+dH(@aDZEe5Maq5L}
zkcc4c2cW!v8^(P-?&wk6ABwhXE;x0k-rKr2ne0D$yzA~Cw5Gf6WUuw4g@dOD(`ntg
z`{3cT<KB665o_)MlFHs`bjuvi%P*)c7;xE5;z-A*EJCi5FZKilV}9XUeyAJ_PXr4+
z*d9<)G_u0?%k%Gmufe@EZ0J%<4L(ycWR)KUb%XlQ#V8Y*P(hhWrqkElblRj(h>*}2
z(GqA&A`wrfyk-Nvis+(@D-$Wtt%0Y}uk%tik3^omiK!`0Nw2+94ZEDzG@wbqPbH2<
z%S!qnUguSHI;JDIaY(O#O|-JVIGW;bUQt7oVD3}lp<rLt?VPXcXb(R^=fzQHH<y?i
zZI6mf_Oien$&6TL!p8+BPt!JhVw@S|iOa`k0k`YF*kKFnz{(w1GZA+hz@wYL|F67V
zHlTIDI<PtiRQ0fX*g-tAG%p9{rP@XQh|Gz3GlL>QA*(r1w&TG0KMYZKL1j%l!<j*x
QL9%}chz+)|4y@FHKb>?v_y7O^


From 41fd1409811a93174edcdcd786cae309c6069b96 Mon Sep 17 00:00:00 2001
From: Steffen Cruz <steffenjcruz@gmail.com>
Date: Fri, 1 Mar 2024 13:05:20 -0600
Subject: [PATCH 30/34] Remove .DS_Store in all directories

---
 .gitignore | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.gitignore b/.gitignore
index c894ee73..5dfe5518 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,8 @@ __pycache__/
 *.py[cod]
 *$py.class
 .DS_Store
+**/.DS_Store
+
 
 # C extensions
 *.so

From 9186c8be482203e6be73c66bbf859e73f152a48b Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 14:44:05 -0500
Subject: [PATCH 31/34] fix flake8 warnings

---
 prompting/base/prompting_miner.py              | 2 +-
 prompting/miners/agents/base_agent.py          | 1 +
 prompting/miners/agents/react_agent.py         | 1 +
 prompting/miners/agents/single_action_agent.py | 1 +
 4 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/prompting/base/prompting_miner.py b/prompting/base/prompting_miner.py
index 8c571cd7..fe552efe 100644
--- a/prompting/base/prompting_miner.py
+++ b/prompting/base/prompting_miner.py
@@ -193,7 +193,7 @@ def log_status(self):
 
 # This is the main function, which runs the miner.
 if __name__ == "__main__":
-    with Miner() as miner:
+    with BasePromptingMiner() as miner:
         while True:
             miner.log_status()
             time.sleep(5)
diff --git a/prompting/miners/agents/base_agent.py b/prompting/miners/agents/base_agent.py
index 44266251..7335f304 100644
--- a/prompting/miners/agents/base_agent.py
+++ b/prompting/miners/agents/base_agent.py
@@ -1,4 +1,5 @@
 from abc import ABC
+from deprecation import deprecated
 
 @deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class BaseAgent(ABC):
diff --git a/prompting/miners/agents/react_agent.py b/prompting/miners/agents/react_agent.py
index cc77ede3..09037821 100644
--- a/prompting/miners/agents/react_agent.py
+++ b/prompting/miners/agents/react_agent.py
@@ -7,6 +7,7 @@
 from langchain.utilities import WikipediaAPIWrapper
 from langchain.agents import Tool
 from langchain.tools import WikipediaQueryRun
+from deprecation import deprecated
 
 @deprecated(deprecated_in="1.1.2", removed_in="2.0", details="AgentMiner is unsupported.")
 class ReactAgent(BaseAgent):
diff --git a/prompting/miners/agents/single_action_agent.py b/prompting/miners/agents/single_action_agent.py
index 7d894802..b82731cb 100644
--- a/prompting/miners/agents/single_action_agent.py
+++ b/prompting/miners/agents/single_action_agent.py
@@ -24,6 +24,7 @@
     AgentOutputParser,
 )
 from langchain.tools import WikipediaQueryRun
+from deprecation import deprecated
 
 # Set up the base template
 template = """Answer the following questions as best you can. You have access to the following tools:

From 7719a480c2568a9ff6e59ee641b65f8fbb60f290 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 15:29:43 -0500
Subject: [PATCH 32/34] drops unnecessary run code for base miner

---
 prompting/base/prompting_miner.py | 12 ------------
 1 file changed, 12 deletions(-)

diff --git a/prompting/base/prompting_miner.py b/prompting/base/prompting_miner.py
index fe552efe..0ee53337 100644
--- a/prompting/base/prompting_miner.py
+++ b/prompting/base/prompting_miner.py
@@ -189,15 +189,3 @@ def log_status(self):
         bt.logging.info(
             f"Miner running:: network: {self.subtensor.network} | step: {self.step} | uid: {self.uid} | trust: {m.trust[self.uid]:.3f} | emission {m.emission[self.uid]:.3f}"
         )
-
-
-# This is the main function, which runs the miner.
-if __name__ == "__main__":
-    with BasePromptingMiner() as miner:
-        while True:
-            miner.log_status()
-            time.sleep(5)
-
-            if miner.should_exit:
-                bt.logging.warning("Ending miner...")
-                break

From 10fc0ce01953203f650cc84bebc2d09e65faff33 Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 15:30:19 -0500
Subject: [PATCH 33/34] Update prompting/miners/openai_miner.py

Co-authored-by: Steffen Cruz <steffenjcruz@gmail.com>
---
 prompting/miners/openai_miner.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index 3ccca277..818b3cf2 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -35,7 +35,7 @@
 
 class OpenAIMiner(BasePromptingMiner):
     """Langchain-based miner which uses OpenAI's API as the LLM.
-
+This miner does not use any tools or external APIs when processing requests - it relies entirely on the models' own representation and world model. In some cases, this can produce lower quality results.
     You should also install the dependencies for this miner, which can be found in the requirements.txt file in this directory.
     """
 

From acba24947e2e71265ceac831ed8897116fa3658a Mon Sep 17 00:00:00 2001
From: p-ferreira <38992619+p-ferreira@users.noreply.github.com>
Date: Fri, 1 Mar 2024 15:32:10 -0500
Subject: [PATCH 34/34] deprecates tool miner

---
 prompting/miners/tool_miner.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/prompting/miners/tool_miner.py b/prompting/miners/tool_miner.py
index 321a97ad..f16479dd 100644
--- a/prompting/miners/tool_miner.py
+++ b/prompting/miners/tool_miner.py
@@ -16,8 +16,9 @@
 from langchain.prompts import ChatPromptTemplate
 from langchain_core.output_parsers import StrOutputParser
 from traceback import print_exception
+from deprecation import deprecated
 
-
+@deprecated(deprecated_in="1.1.2", removed_in="2.0", details="ToolMiner is unsupported.")
 class ToolMiner(BasePromptingMiner):
     @classmethod
     def add_args(cls, parser: argparse.ArgumentParser):