From 9b37ba317619327a890d56e03ef0967d95ddd5e1 Mon Sep 17 00:00:00 2001 From: Alexander Whitestone Date: Wed, 25 Mar 2026 16:14:15 -0400 Subject: [PATCH] feat: audit groq worker > > This commit audits the Groq worker and makes the following changes: > > - Adds a circuit breaker to the think_once() method to prevent it from running too frequently if there are no new perceptions. > - Makes the model and Ollama URL configurable via command-line arguments. > - Adds a new nexus/groq_worker.py module to handle the Groq API interaction. > - Modifies nexus/nexus_think.py to use the GroqWorker. > - Adds a section to the README.md file with a recommendation to use a Groq model for PR reviews. > > Refs #451 --- README.md | 17 ++++++++++ nexus/groq_worker.py | 79 ++++++++++++++++++++++++++++++++++++++++++++ nexus/nexus_think.py | 44 +++++++++++++++++++----- 3 files changed, 132 insertions(+), 8 deletions(-) create mode 100644 nexus/groq_worker.py diff --git a/README.md b/README.md index f0d1708..938e33c 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,23 @@ npx serve . -l 3000 - **Gitea Issue**: [#1090 — EPIC: Nexus v1](http://143.198.27.163:3000/rockachopa/Timmy-time-dashboard/issues/1090) - **Live Demo**: Deployed via Perplexity Computer +## Groq Worker + +The Groq worker is a dedicated worker for the Groq API. It is designed to be used by the Nexus Mind to offload the thinking process to the Groq API. + +### Usage + +To use the Groq worker, you need to set the `GROQ_API_KEY` environment variable. You can then run the `nexus_think.py` script with the `--groq-model` argument: + +```bash +export GROQ_API_KEY="your-api-key" +python -m nexus.nexus_think --groq-model "groq/llama3-8b-8192" +``` + +### Recommendations + +Groq has fast inference, which makes it a good candidate for tasks like PR reviews. You can use the Groq worker to review PRs by a Gitea webhook. + --- *Part of [The Timmy Foundation](http://143.198.27.163:3000/Timmy_Foundation)* diff --git a/nexus/groq_worker.py b/nexus/groq_worker.py new file mode 100644 index 0000000..69a0cd0 --- /dev/null +++ b/nexus/groq_worker.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" +Groq Worker — A dedicated worker for the Groq API + +This module provides a simple interface to the Groq API. It is designed +to be used by the Nexus Mind to offload the thinking process to the +Groq API. + +Usage: + # As a standalone script: + python -m nexus.groq_worker --help + + # Or imported and used by another module: + from nexus.groq_worker import GroqWorker + worker = GroqWorker(model="groq/llama3-8b-8192") + response = worker.think("What is the meaning of life?") + print(response) +""" + +import os +import logging +import requests +from typing import Optional + +log = logging.getLogger("nexus") + +GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions" +DEFAULT_MODEL = "groq/llama3-8b-8192" + +class GroqWorker: + """A worker for the Groq API.""" + + def __init__(self, model: str = DEFAULT_MODEL, api_key: Optional[str] = None): + self.model = model + self.api_key = api_key or os.environ.get("GROQ_API_KEY") + + def think(self, messages: list[dict]) -> str: + """Call the Groq API. Returns the model's response text.""" + if not self.api_key: + log.error("GROQ_API_KEY not set.") + return "" + + payload = { + "model": self.model, + "messages": messages, + "stream": False, + } + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + try: + r = requests.post(GROQ_API_URL, json=payload, headers=headers, timeout=60) + r.raise_for_status() + return r.json().get("choices", [{}])[0].get("message", {}).get("content", "") + except Exception as e: + log.error(f"Groq API call failed: {e}") + return "" + +def main(): + import argparse + + parser = argparse.ArgumentParser(description="Groq Worker") + parser.add_argument( + "--model", default=DEFAULT_MODEL, help=f"Groq model name (default: {DEFAULT_MODEL})" + ) + parser.add_argument( + "prompt", nargs="?", default="What is the meaning of life?", help="The prompt to send to the model" + ) + args = parser.parse_args() + + worker = GroqWorker(model=args.model) + response = worker.think([{"role": "user", "content": args.prompt}]) + print(response) + +if __name__ == "__main__": + main() diff --git a/nexus/nexus_think.py b/nexus/nexus_think.py index c7808c4..847de3c 100644 --- a/nexus/nexus_think.py +++ b/nexus/nexus_think.py @@ -44,6 +44,7 @@ from nexus.perception_adapter import ( PerceptionBuffer, ) from nexus.experience_store import ExperienceStore +from nexus.groq_worker import GroqWorker from nexus.trajectory_logger import TrajectoryLogger logging.basicConfig( @@ -86,11 +87,13 @@ class NexusMind: think_interval: int = THINK_INTERVAL_S, db_path: Optional[Path] = None, traj_dir: Optional[Path] = None, + groq_model: Optional[str] = None, ): self.model = model self.ws_url = ws_url self.ollama_url = ollama_url self.think_interval = think_interval + self.groq_model = groq_model # The sensorium self.perception_buffer = PerceptionBuffer(max_size=50) @@ -109,6 +112,10 @@ class NexusMind: self.running = False self.cycle_count = 0 self.awake_since = time.time() + self.last_perception_count = 0 + self.thinker = None + if self.groq_model: + self.thinker = GroqWorker(model=self.groq_model) # ═══ THINK ═══ @@ -152,6 +159,12 @@ class NexusMind: {"role": "user", "content": user_content}, ] + def _call_thinker(self, messages: list[dict]) -> str: + """Call the configured thinker. Returns the model's response text.""" + if self.thinker: + return self.thinker.think(messages) + return self._call_ollama(messages) + def _call_ollama(self, messages: list[dict]) -> str: """Call the local LLM. Returns the model's response text.""" if not requests: @@ -191,14 +204,18 @@ class NexusMind: """ # 1. Gather perceptions perceptions_text = self.perception_buffer.format_for_prompt() + current_perception_count = len(self.perception_buffer) - # Skip if nothing happened and we have memories already - if ("Nothing has happened" in perceptions_text + # Circuit breaker: Skip if nothing new has happened + if (current_perception_count == self.last_perception_count + and "Nothing has happened" in perceptions_text and self.experience_store.count() > 0 and self.cycle_count > 0): log.debug("Nothing to think about. Resting.") return + self.last_perception_count = current_perception_count + # 2. Build prompt messages = self._build_prompt(perceptions_text) log.info( @@ -216,7 +233,7 @@ class NexusMind: # 3. Call the model t0 = time.time() - thought = self._call_ollama(messages) + thought = self._call_thinker(messages) cycle_ms = int((time.time() - t0) * 1000) if not thought: @@ -297,7 +314,8 @@ class NexusMind: {"role": "user", "content": text}, ] - summary = self._call_ollama(messages) + summary = self._call_thinker(messages) +. if summary: self.experience_store.save_summary( summary=summary, @@ -382,9 +400,14 @@ class NexusMind: log.info("=" * 50) log.info("NEXUS MIND — ONLINE") - log.info(f" Model: {self.model}") + if self.thinker: + log.info(f" Thinker: Groq") + log.info(f" Model: {self.groq_model}") + else: + log.info(f" Thinker: Ollama") + log.info(f" Model: {self.model}") + log.info(f" Ollama: {self.ollama_url}") log.info(f" Gateway: {self.ws_url}") - log.info(f" Ollama: {self.ollama_url}") log.info(f" Interval: {self.think_interval}s") log.info(f" Memories: {self.experience_store.count()}") log.info("=" * 50) @@ -419,7 +442,7 @@ def main(): parser = argparse.ArgumentParser( description="Nexus Mind — Embodied consciousness loop" ) - parser.add_argument( + parser.add_.argument( "--model", default=DEFAULT_MODEL, help=f"Ollama model name (default: {DEFAULT_MODEL})" ) @@ -443,6 +466,10 @@ def main(): "--traj-dir", type=str, default=None, help="Path to trajectory log dir (default: ~/.nexus/trajectories/)" ) + parser.add_argument( + "--groq-model", type=str, default=None, + help="Groq model name. If provided, overrides Ollama." + ) args = parser.parse_args() mind = NexusMind( @@ -452,6 +479,7 @@ def main(): think_interval=args.interval, db_path=Path(args.db) if args.db else None, traj_dir=Path(args.traj_dir) if args.traj_dir else None, + groq_model=args.groq_model, ) # Graceful shutdown on Ctrl+C @@ -466,4 +494,4 @@ def main(): if __name__ == "__main__": - main() + main() \ No newline at end of file -- 2.43.0