Compare commits

...

3 Commits

Author SHA1 Message Date
Alexander Whitestone
d7abb7db36 chore: checkpoint local wip for issue 892 2026-04-05 13:31:06 -04:00
Alexander Whitestone
f8f5d08678 feat: Implement NIP-89 and NIP-90 for Nostr agent partnerships
This commit introduces a new NostrClient for interacting with the Nostr
network. The client implements the basic functionality for NIP-89
(discovery of agent capabilities) and NIP-90 (job delegation).

The following changes are included:

- A new  class in
  that can connect to relays, subscribe to events, and publish events.
- Implementation of  (NIP-89) to discover agent
  capability cards.
- Implementation of  (NIP-90) to create and publish
  job requests.
- Added  and usage: websockets [--version | <uri>] as dependencies.
- Added tests for the .

Refs #892
2026-03-23 22:07:43 -04:00
0b4ed1b756 [claude] feat: enforce 3-issue cap on Kimi delegation (#1304) (#1310)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-24 02:00:34 +00:00
54 changed files with 728 additions and 155 deletions

View File

@@ -1,6 +1,12 @@
#!/usr/bin/env python3
"""Tiny auth gate for nginx auth_request. Sets a cookie after successful basic auth."""
import hashlib, hmac, http.server, time, base64, os, sys
import base64
import hashlib
import hmac
import http.server
import os
import sys
import time
SECRET = os.environ.get("AUTH_GATE_SECRET", "")
USER = os.environ.get("AUTH_GATE_USER", "")

View File

@@ -1,5 +1,4 @@
import os
import sys
from pathlib import Path
@@ -8,6 +7,7 @@ sys.path.insert(0, str(Path(__file__).parent / "src"))
from timmy.memory_system import memory_store
def index_research_documents():
research_dir = Path("docs/research")
if not research_dir.is_dir():

View File

@@ -1,9 +1,7 @@
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from sqlalchemy import engine_from_config, pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
@@ -19,7 +17,7 @@ if config.config_file_name is not None:
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from src.dashboard.models.database import Base
from src.dashboard.models.calm import Task, JournalEntry
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,

View File

@@ -5,17 +5,16 @@ Revises:
Create Date: 2026-03-02 10:57:55.537090
"""
from typing import Sequence, Union
from collections.abc import Sequence
from alembic import op
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0093c15b4bbf'
down_revision: Union[str, Sequence[str], None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
down_revision: str | Sequence[str] | None = None
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:

125
poetry.lock generated
View File

@@ -752,10 +752,9 @@ pycparser = {version = "*", markers = "implementation_name != \"PyPy\""}
name = "charset-normalizer"
version = "3.4.4"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = true
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"voice\" or extra == \"research\""
files = [
{file = "charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d"},
{file = "charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8"},
@@ -942,6 +941,67 @@ prompt-toolkit = ">=3.0.36"
[package.extras]
testing = ["pytest (>=7.2.1)", "pytest-cov (>=4.0.0)", "tox (>=4.4.3)"]
[[package]]
name = "coincurve"
version = "21.0.0"
description = "Safest and fastest Python library for secp256k1 elliptic curve operations"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "coincurve-21.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:986727bba6cf0c5670990358dc6af9a54f8d3e257979b992a9dbd50dd82fa0dc"},
{file = "coincurve-21.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1c584059de61ed16c658e7eae87ee488e81438897dae8fabeec55ef408af474"},
{file = "coincurve-21.0.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4210b35c922b2b36c987a48c0b110ab20e490a2d6a92464ca654cb09e739fcc"},
{file = "coincurve-21.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf67332cc647ef52ef371679c76000f096843ae266ae6df5e81906eb6463186b"},
{file = "coincurve-21.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997607a952913c6a4bebe86815f458e77a42467b7a75353ccdc16c3336726880"},
{file = "coincurve-21.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cfdd0938f284fb147aa1723a69f8794273ec673b10856b6e6f5f63fcc99d0c2e"},
{file = "coincurve-21.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:88c1e3f6df2f2fbe18152c789a18659ee0429dc604fc77530370c9442395f681"},
{file = "coincurve-21.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:530b58ed570895612ef510e28df5e8a33204b03baefb5c986e22811fa09622ef"},
{file = "coincurve-21.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:f920af756a98edd738c0cfa431e81e3109aeec6ffd6dffb5ed4f5b5a37aacba8"},
{file = "coincurve-21.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:070e060d0d57b496e68e48b39d5e3245681376d122827cb8e09f33669ff8cf1b"},
{file = "coincurve-21.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:65ec42cab9c60d587fb6275c71f0ebc580625c377a894c4818fb2a2b583a184b"},
{file = "coincurve-21.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5828cd08eab928db899238874d1aab12fa1236f30fe095a3b7e26a5fc81df0a3"},
{file = "coincurve-21.0.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de1cac75182de9f71ce41415faafcaf788303e21cbd0188064e268d61625e5"},
{file = "coincurve-21.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cda058d9394bea30d57a92fdc18ee3ca6b5bc8ef776a479a2ffec917105836"},
{file = "coincurve-21.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9070804d7c71badfe4f0bf19b728cfe7c70c12e733938ead6b1db37920b745c0"},
{file = "coincurve-21.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:669ab5db393637824b226de058bb7ea0cb9a0236e1842d7b22f74d4a8a1f1ff1"},
{file = "coincurve-21.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3bcd538af097b3914ec3cb654262e72e224f95f2e9c1eb7fbd75d843ae4e528e"},
{file = "coincurve-21.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45b6a5e6b5536e1f46f729829d99ce1f8f847308d339e8880fe7fa1646935c10"},
{file = "coincurve-21.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:87597cf30dfc05fa74218810776efacf8816813ab9fa6ea1490f94e9f8b15e77"},
{file = "coincurve-21.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:b992d1b1dac85d7f542d9acbcf245667438839484d7f2b032fd032256bcd778e"},
{file = "coincurve-21.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f60ad56113f08e8c540bb89f4f35f44d434311433195ffff22893ccfa335070c"},
{file = "coincurve-21.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1cb1cd19fb0be22e68ecb60ad950b41f18b9b02eebeffaac9391dc31f74f08f2"},
{file = "coincurve-21.0.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05d7e255a697b3475d7ae7640d3bdef3d5bc98ce9ce08dd387f780696606c33b"},
{file = "coincurve-21.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a366c314df7217e3357bb8c7d2cda540b0bce180705f7a0ce2d1d9e28f62ad4"},
{file = "coincurve-21.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b04778b75339c6e46deb9ae3bcfc2250fbe48d1324153e4310fc4996e135715"},
{file = "coincurve-21.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8efcbdcd50cc219989a2662e6c6552f455efc000a15dd6ab3ebf4f9b187f41a3"},
{file = "coincurve-21.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6df44b4e3b7acdc1453ade52a52e3f8a5b53ecdd5a06bd200f1ec4b4e250f7d9"},
{file = "coincurve-21.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bcc0831f07cb75b91c35c13b1362e7b9dc76c376b27d01ff577bec52005e22a8"},
{file = "coincurve-21.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:5dd7b66b83b143f3ad3861a68fc0279167a0bae44fe3931547400b7a200e90b1"},
{file = "coincurve-21.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:78dbe439e8cb22389956a4f2f2312813b4bd0531a0b691d4f8e868c7b366555d"},
{file = "coincurve-21.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9df5ceb5de603b9caf270629996710cf5ed1d43346887bc3895a11258644b65b"},
{file = "coincurve-21.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:154467858d23c48f9e5ab380433bc2625027b50617400e2984cc16f5799ab601"},
{file = "coincurve-21.0.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f57f07c44d14d939bed289cdeaba4acb986bba9f729a796b6a341eab1661eedc"},
{file = "coincurve-21.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fb03e3a388a93d31ed56a442bdec7983ea404490e21e12af76fb1dbf097082a"},
{file = "coincurve-21.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d09ba4fd9d26b00b06645fcd768c5ad44832a1fa847ebe8fb44970d3204c3cb7"},
{file = "coincurve-21.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1a1e7ee73bc1b3bcf14c7b0d1f44e6485785d3b53ef7b16173c36d3cefa57f93"},
{file = "coincurve-21.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ad05952b6edc593a874df61f1bc79db99d716ec48ba4302d699e14a419fe6f51"},
{file = "coincurve-21.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4d2bf350ced38b73db9efa1ff8fd16a67a1cb35abb2dda50d89661b531f03fd3"},
{file = "coincurve-21.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:54d9500c56d5499375e579c3917472ffcf804c3584dd79052a79974280985c74"},
{file = "coincurve-21.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:773917f075ec4b94a7a742637d303a3a082616a115c36568eb6c873a8d950d18"},
{file = "coincurve-21.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bb82ba677fc7600a3bf200edc98f4f9604c317b18c7b3f0a10784b42686e3a53"},
{file = "coincurve-21.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5001de8324c35eee95f34e011a5c3b4e7d9ae9ca4a862a93b2c89b3f467f511b"},
{file = "coincurve-21.0.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4d0bb5340bcac695731bef51c3e0126f252453e2d1ae7fa1486d90eff978bf6"},
{file = "coincurve-21.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a9b49789ff86f3cf86cfc8ff8c6c43bac2607720ec638e8ba471fa7e8765bd2"},
{file = "coincurve-21.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b85b49e192d2ca1a906a7b978bacb55d4dcb297cc2900fbbd9b9180d50878779"},
{file = "coincurve-21.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad6445f0bb61b3a4404d87a857ddb2a74a642cd4d00810237641aab4d6b1a42f"},
{file = "coincurve-21.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d3f017f1491491f3f2c49e5d2d3a471a872d75117bfcb804d1167061c94bd347"},
{file = "coincurve-21.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:500e5e38cd4cbc4ea8a5c631ce843b1d52ef19ac41128568214d150f75f1f387"},
{file = "coincurve-21.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:ef81ca24511a808ad0ebdb8fdaf9c5c87f12f935b3d117acccc6520ad671bcce"},
{file = "coincurve-21.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:6ec8e859464116a3c90168cd2bd7439527d4b4b5e328b42e3c8e0475f9b0bf71"},
{file = "coincurve-21.0.0.tar.gz", hash = "sha256:8b37ce4265a82bebf0e796e21a769e56fdbf8420411ccbe3fafee4ed75b6a6e5"},
]
[[package]]
name = "colorama"
version = "0.4.6"
@@ -3930,6 +3990,30 @@ dev = ["coverage[toml] (==7.10.7)", "cryptography (>=3.4.0)", "pre-commit", "pyt
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==7.10.7)", "pytest (>=8.4.2,<9.0.0)"]
[[package]]
name = "pynostr"
version = "0.7.0"
description = "Python Library for nostr."
optional = false
python-versions = ">3.7.0"
groups = ["main"]
files = [
{file = "pynostr-0.7.0-py3-none-any.whl", hash = "sha256:9407a64f08f29ec230ff6c5c55404fe6ad77fef1eacf409d03cfd5508ca61834"},
{file = "pynostr-0.7.0.tar.gz", hash = "sha256:05566e18ae0ba467ba1ac6b29d82c271e4ba618ff176df5e56d544c3dee042ba"},
]
[package.dependencies]
coincurve = ">=1.8.0"
cryptography = ">=37.0.4"
requests = "*"
rich = "*"
tlv8 = "*"
tornado = "*"
typer = "*"
[package.extras]
websocket-client = ["websocket-client (>=1.3.3)"]
[[package]]
name = "pyobjc"
version = "12.1"
@@ -8016,10 +8100,9 @@ files = [
name = "requests"
version = "2.32.5"
description = "Python HTTP for Humans."
optional = true
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"voice\" or extra == \"research\""
files = [
{file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"},
{file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"},
@@ -8828,6 +8911,17 @@ docs = ["sphinx", "sphinx-autobuild", "sphinx-llms-txt-link", "sphinx-no-pragma"
lint = ["doc8", "mypy", "pydoclint", "ruff"]
test = ["coverage", "fake.py", "pytest", "pytest-codeblock", "pytest-cov", "pytest-ordering", "tox"]
[[package]]
name = "tlv8"
version = "0.10.0"
description = "Python module to handle type-length-value (TLV) encoded data 8-bit type, 8-bit length, and N-byte value as described within the Apple HomeKit Accessory Protocol Specification Non-Commercial Version Release R2."
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "tlv8-0.10.0.tar.gz", hash = "sha256:7930a590267b809952272ac2a27ee81b99ec5191fa2eba08050e0daee4262684"},
]
[[package]]
name = "tokenizers"
version = "0.22.2"
@@ -8934,6 +9028,26 @@ typing-extensions = ">=4.10.0"
opt-einsum = ["opt-einsum (>=3.3)"]
optree = ["optree (>=0.13.0)"]
[[package]]
name = "tornado"
version = "6.5.5"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "tornado-6.5.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:487dc9cc380e29f58c7ab88f9e27cdeef04b2140862e5076a66fb6bb68bb1bfa"},
{file = "tornado-6.5.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:65a7f1d46d4bb41df1ac99f5fcb685fb25c7e61613742d5108b010975a9a6521"},
{file = "tornado-6.5.5-cp39-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e74c92e8e65086b338fd56333fb9a68b9f6f2fe7ad532645a290a464bcf46be5"},
{file = "tornado-6.5.5-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:435319e9e340276428bbdb4e7fa732c2d399386d1de5686cb331ec8eee754f07"},
{file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3f54aa540bdbfee7b9eb268ead60e7d199de5021facd276819c193c0fb28ea4e"},
{file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:36abed1754faeb80fbd6e64db2758091e1320f6bba74a4cf8c09cd18ccce8aca"},
{file = "tornado-6.5.5-cp39-abi3-win32.whl", hash = "sha256:dd3eafaaeec1c7f2f8fdcd5f964e8907ad788fe8a5a32c4426fbbdda621223b7"},
{file = "tornado-6.5.5-cp39-abi3-win_amd64.whl", hash = "sha256:6443a794ba961a9f619b1ae926a2e900ac20c34483eea67be4ed8f1e58d3ef7b"},
{file = "tornado-6.5.5-cp39-abi3-win_arm64.whl", hash = "sha256:2c9a876e094109333f888539ddb2de4361743e5d21eece20688e3e351e4990a6"},
{file = "tornado-6.5.5.tar.gz", hash = "sha256:192b8f3ea91bd7f1f50c06955416ed76c6b72f96779b962f07f911b91e8d30e9"},
]
[[package]]
name = "tqdm"
version = "4.67.3"
@@ -9205,7 +9319,6 @@ files = [
{file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"},
{file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"},
]
markers = {main = "extra == \"voice\" or extra == \"research\" or extra == \"dev\""}
[package.dependencies]
pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""}
@@ -9720,4 +9833,4 @@ voice = ["openai-whisper", "piper-tts", "pyttsx3", "sounddevice"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<4"
content-hash = "5af3028474051032bef12182eaa5ef55950cbaeca21d1793f878d54c03994eb0"
content-hash = "bca84c65e590e038a4b8bbd582ce8efa041f678b3adad47139d13c04690c5940"

View File

@@ -63,6 +63,8 @@ pytest-randomly = { version = ">=3.16.0", optional = true }
pytest-xdist = { version = ">=3.5.0", optional = true }
anthropic = "^0.86.0"
opencv-python = "^4.13.0.92"
websockets = ">=12.0"
pynostr = "*"
[tool.poetry.extras]
telegram = ["python-telegram-bot"]

View File

@@ -5,7 +5,6 @@ Usage:
python scripts/add_pytest_markers.py
"""
import re
from pathlib import Path
@@ -93,7 +92,7 @@ def main():
print(f"⏭️ {rel_path:<50} (already marked)")
print(f"\n📊 Total files marked: {marked_count}")
print(f"\n✨ Pytest markers configured. Run 'pytest -m unit' to test specific categories.")
print("\n✨ Pytest markers configured. Run 'pytest -m unit' to test specific categories.")
if __name__ == "__main__":

View File

@@ -1,8 +1,7 @@
import os
def fix_l402_proxy():
path = "src/timmy_serve/l402_proxy.py"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
# 1. Add hmac_secret to Macaroon dataclass
@@ -132,7 +131,7 @@ if _MACAROON_SECRET_RAW == _MACAROON_SECRET_DEFAULT or _HMAC_SECRET_RAW == _HMAC
def fix_xss():
# Fix chat_message.html
path = "src/dashboard/templates/partials/chat_message.html"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
content = content.replace("{{ user_message }}", "{{ user_message | e }}")
content = content.replace("{{ response }}", "{{ response | e }}")
@@ -142,7 +141,7 @@ def fix_xss():
# Fix history.html
path = "src/dashboard/templates/partials/history.html"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
content = content.replace("{{ msg.content }}", "{{ msg.content | e }}")
with open(path, "w") as f:
@@ -150,7 +149,7 @@ def fix_xss():
# Fix briefing.html
path = "src/dashboard/templates/briefing.html"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
content = content.replace("{{ briefing.summary }}", "{{ briefing.summary | e }}")
with open(path, "w") as f:
@@ -158,7 +157,7 @@ def fix_xss():
# Fix approval_card_single.html
path = "src/dashboard/templates/partials/approval_card_single.html"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
content = content.replace("{{ item.title }}", "{{ item.title | e }}")
content = content.replace("{{ item.description }}", "{{ item.description | e }}")
@@ -168,7 +167,7 @@ def fix_xss():
# Fix marketplace.html
path = "src/dashboard/templates/marketplace.html"
with open(path, "r") as f:
with open(path) as f:
content = f.read()
content = content.replace("{{ agent.name }}", "{{ agent.name | e }}")
content = content.replace("{{ agent.role }}", "{{ agent.role | e }}")

View File

@@ -8,8 +8,7 @@ from existing history so the LOOPSTAT panel isn't empty.
import json
import os
import re
import subprocess
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
from urllib.request import Request, urlopen
@@ -227,7 +226,7 @@ def generate_summary(entries: list[dict]):
stats["avg_duration"] = round(stats["total_duration"] / stats["count"])
summary = {
"updated_at": datetime.now(timezone.utc).isoformat(),
"updated_at": datetime.now(UTC).isoformat(),
"window": len(recent),
"total_cycles": len(entries),
"success_rate": round(len(successes) / len(recent), 2) if recent else 0,

View File

@@ -17,7 +17,7 @@ import importlib.util
import json
import sys
import time
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
import requests
@@ -216,7 +216,7 @@ def generate_markdown(all_results: dict, run_date: str) -> str:
lines.append(f"- **Result:** {bres.get('detail', bres.get('error', 'n/a'))}")
snippet = bres.get("code_snippet", "")
if snippet:
lines.append(f"- **Generated code snippet:**")
lines.append("- **Generated code snippet:**")
lines.append(" ```python")
for ln in snippet.splitlines()[:8]:
lines.append(f" {ln}")
@@ -287,7 +287,7 @@ def parse_args() -> argparse.Namespace:
def main() -> int:
args = parse_args()
run_date = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
run_date = datetime.now(UTC).strftime("%Y-%m-%d %H:%M UTC")
print(f"Model Benchmark Suite — {run_date}")
print(f"Testing {len(args.models)} model(s): {', '.join(args.models)}")

View File

@@ -46,8 +46,7 @@ import argparse
import json
import re
import subprocess
import sys
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent
@@ -91,7 +90,7 @@ def _epoch_tag(now: datetime | None = None) -> tuple[str, dict]:
When the date rolls over, the counter resets to 1.
"""
if now is None:
now = datetime.now(timezone.utc)
now = datetime.now(UTC)
iso_cal = now.isocalendar() # (year, week, weekday)
week = iso_cal[1]
@@ -221,7 +220,7 @@ def update_summary() -> None:
for k, v in sorted(by_weekday.items())}
summary = {
"updated_at": datetime.now(timezone.utc).isoformat(),
"updated_at": datetime.now(UTC).isoformat(),
"current_epoch": current_epoch,
"window": len(recent),
"measured_cycles": len(measured),
@@ -293,7 +292,7 @@ def main() -> None:
truly_success = args.success and args.main_green
# Generate epoch turnover tag
now = datetime.now(timezone.utc)
now = datetime.now(UTC)
epoch_tag, epoch_parts = _epoch_tag(now)
entry = {

View File

@@ -11,7 +11,6 @@ Usage: python scripts/dev_server.py [--port PORT]
"""
import argparse
import datetime
import os
import socket
import subprocess
@@ -81,8 +80,8 @@ def _ollama_url() -> str:
def _smoke_ollama(url: str) -> str:
"""Quick connectivity check against Ollama."""
import urllib.request
import urllib.error
import urllib.request
try:
req = urllib.request.Request(url, method="GET")
@@ -101,14 +100,14 @@ def _print_banner(port: int) -> None:
hr = "" * 62
print(flush=True)
print(f" {hr}")
print(f" ┃ Timmy Time — Development Server")
print(" ┃ Timmy Time — Development Server")
print(f" {hr}")
print()
print(f" Dashboard: http://localhost:{port}")
print(f" API docs: http://localhost:{port}/docs")
print(f" Health: http://localhost:{port}/health")
print()
print(f" ── Status ──────────────────────────────────────────────")
print(" ── Status ──────────────────────────────────────────────")
print(f" Backend: {ollama_url} [{ollama_status}]")
print(f" Version: {version}")
print(f" Git commit: {git}")

View File

@@ -319,9 +319,9 @@ def main(argv: list[str] | None = None) -> int:
print(f"Exported {count} training examples to: {args.output}")
print()
print("Next steps:")
print(f" mkdir -p ~/timmy-lora-training")
print(" mkdir -p ~/timmy-lora-training")
print(f" cp {args.output} ~/timmy-lora-training/train.jsonl")
print(f" python scripts/lora_finetune.py --data ~/timmy-lora-training")
print(" python scripts/lora_finetune.py --data ~/timmy-lora-training")
else:
print("No training examples exported.")
return 1

View File

@@ -18,9 +18,8 @@ Called by: deep_triage.sh (before the LLM triage), timmy-loop.sh (every 50 cycle
from __future__ import annotations
import json
import sys
from collections import defaultdict
from datetime import datetime, timezone, timedelta
from datetime import UTC, datetime, timedelta
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent
@@ -52,7 +51,7 @@ def parse_ts(ts_str: str) -> datetime | None:
try:
dt = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
dt = dt.replace(tzinfo=UTC)
return dt
except (ValueError, TypeError):
return None
@@ -60,7 +59,7 @@ def parse_ts(ts_str: str) -> datetime | None:
def window(entries: list[dict], days: int) -> list[dict]:
"""Filter entries to the last N days."""
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
cutoff = datetime.now(UTC) - timedelta(days=days)
result = []
for e in entries:
ts = parse_ts(e.get("timestamp", ""))
@@ -344,7 +343,7 @@ def main() -> None:
recommendations = generate_recommendations(trends, types, repeats, outliers, triage_eff)
insights = {
"generated_at": datetime.now(timezone.utc).isoformat(),
"generated_at": datetime.now(UTC).isoformat(),
"total_cycles_analyzed": len(cycles),
"trends": trends,
"by_type": types,
@@ -371,7 +370,7 @@ def main() -> None:
header += f" · current epoch: {latest_epoch}"
print(header)
print(f"\n TRENDS (7d vs previous 7d):")
print("\n TRENDS (7d vs previous 7d):")
r7 = trends["recent_7d"]
p7 = trends["previous_7d"]
print(f" Cycles: {r7['count']:>3d} (was {p7['count']})")
@@ -383,14 +382,14 @@ def main() -> None:
print(f" PRs merged: {r7['prs_merged']:>3d} (was {p7['prs_merged']})")
print(f" Lines net: {r7['lines_net']:>+5d}")
print(f"\n BY TYPE:")
print("\n BY TYPE:")
for t, info in sorted(types.items(), key=lambda x: -x[1]["count"]):
print(f" {t:12s} n={info['count']:>2d} "
f"ok={info['success_rate']*100:>3.0f}% "
f"avg={info['avg_duration']//60}m{info['avg_duration']%60:02d}s")
if repeats:
print(f"\n REPEAT FAILURES:")
print("\n REPEAT FAILURES:")
for rf in repeats[:3]:
print(f" #{rf['issue']} failed {rf['failure_count']}x")

View File

@@ -360,7 +360,7 @@ def main(argv: list[str] | None = None) -> int:
return rc
# Default: train
print(f"Starting LoRA fine-tuning")
print("Starting LoRA fine-tuning")
print(f" Model: {model_path}")
print(f" Data: {args.data}")
print(f" Adapter path: {args.adapter_path}")

View File

@@ -9,11 +9,10 @@ This script runs before commits to catch issues early:
- Syntax errors in test files
"""
import sys
import subprocess
from pathlib import Path
import ast
import re
import subprocess
import sys
from pathlib import Path
def check_imports():
@@ -70,7 +69,7 @@ def check_test_syntax():
for test_file in tests_dir.rglob("test_*.py"):
try:
with open(test_file, "r") as f:
with open(test_file) as f:
ast.parse(f.read())
print(f"{test_file.relative_to(tests_dir.parent)} has valid syntax")
except SyntaxError as e:
@@ -86,7 +85,7 @@ def check_platform_specific_tests():
# Check for hardcoded /Users/ paths in tests
tests_dir = Path("tests").resolve()
for test_file in tests_dir.rglob("test_*.py"):
with open(test_file, "r") as f:
with open(test_file) as f:
content = f.read()
if 'startswith("/Users/")' in content:
issues.append(
@@ -110,7 +109,7 @@ def check_docker_availability():
if docker_test_files:
for test_file in docker_test_files:
with open(test_file, "r") as f:
with open(test_file) as f:
content = f.read()
has_skipif = "@pytest.mark.skipif" in content or "pytestmark = pytest.mark.skipif" in content
if not has_skipif and "docker" in content.lower():

View File

@@ -83,8 +83,8 @@ def test_tcp_connection(host: str, port: int, timeout: float) -> tuple[bool, soc
return True, sock
except OSError as exc:
print(f" ✗ Connection failed: {exc}")
print(f" Checklist:")
print(f" - Is Bannerlord running with GABS mod enabled?")
print(" Checklist:")
print(" - Is Bannerlord running with GABS mod enabled?")
print(f" - Is port {port} open in Windows Firewall?")
print(f" - Is the VM IP correct? (got: {host})")
return False, None
@@ -92,7 +92,7 @@ def test_tcp_connection(host: str, port: int, timeout: float) -> tuple[bool, soc
def test_ping(sock: socket.socket) -> bool:
"""PASS: JSON-RPC ping returns a 2.0 response."""
print(f"\n[2/4] JSON-RPC ping")
print("\n[2/4] JSON-RPC ping")
try:
t0 = time.monotonic()
resp = _rpc(sock, "ping", req_id=1)
@@ -109,7 +109,7 @@ def test_ping(sock: socket.socket) -> bool:
def test_game_state(sock: socket.socket) -> bool:
"""PASS: get_game_state returns a result (game must be in a campaign)."""
print(f"\n[3/4] get_game_state call")
print("\n[3/4] get_game_state call")
try:
t0 = time.monotonic()
resp = _rpc(sock, "get_game_state", req_id=2)
@@ -120,7 +120,7 @@ def test_game_state(sock: socket.socket) -> bool:
if code == -32601:
# Method not found — GABS version may not expose this method
print(f" ~ Method not available ({elapsed_ms:.1f} ms): {msg}")
print(f" This is acceptable if game is not yet in a campaign.")
print(" This is acceptable if game is not yet in a campaign.")
return True
print(f" ✗ RPC error ({elapsed_ms:.1f} ms) [{code}]: {msg}")
return False
@@ -191,7 +191,7 @@ def main() -> int:
args = parser.parse_args()
print("=" * 60)
print(f"GABS Connectivity Test Suite")
print("GABS Connectivity Test Suite")
print(f"Target: {args.host}:{args.port}")
print(f"Timeout: {args.timeout}s")
print("=" * 60)

View File

@@ -150,7 +150,7 @@ def test_model_available(model: str) -> bool:
def test_basic_response(model: str) -> bool:
"""PASS: model responds coherently to a simple prompt."""
print(f"\n[2/5] Basic response test")
print("\n[2/5] Basic response test")
messages = [
{"role": "user", "content": "Reply with exactly: HERMES_OK"},
]
@@ -188,7 +188,7 @@ def test_memory_usage() -> bool:
def test_tool_calling(model: str) -> bool:
"""PASS: model produces a tool_calls response (not raw text) for a tool-use prompt."""
print(f"\n[4/5] Tool-calling test")
print("\n[4/5] Tool-calling test")
messages = [
{
"role": "user",
@@ -236,7 +236,7 @@ def test_tool_calling(model: str) -> bool:
def test_timmy_persona(model: str) -> bool:
"""PASS: model accepts a Timmy persona system prompt and responds in-character."""
print(f"\n[5/5] Timmy-persona smoke test")
print("\n[5/5] Timmy-persona smoke test")
messages = [
{
"role": "system",

View File

@@ -26,7 +26,7 @@ import argparse
import json
import sys
import time
from dataclasses import dataclass, field
from dataclasses import dataclass
from typing import Any
try:

View File

@@ -16,7 +16,7 @@ import json
import os
import re
import sys
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
# ── Config ──────────────────────────────────────────────────────────────
@@ -277,7 +277,7 @@ def update_quarantine(scored: list[dict]) -> list[dict]:
"""Auto-quarantine issues that have failed >= 2 times. Returns filtered list."""
failures = load_cycle_failures()
quarantine = load_quarantine()
now = datetime.now(timezone.utc).isoformat()
now = datetime.now(UTC).isoformat()
filtered = []
for item in scored:
@@ -366,7 +366,7 @@ def run_triage() -> list[dict]:
backup_data = QUEUE_BACKUP_FILE.read_text()
json.loads(backup_data) # Validate backup
QUEUE_FILE.write_text(backup_data)
print(f"[triage] Restored queue.json from backup")
print("[triage] Restored queue.json from backup")
except (json.JSONDecodeError, OSError) as restore_exc:
print(f"[triage] ERROR: Backup restore failed: {restore_exc}", file=sys.stderr)
# Write empty list as last resort
@@ -377,7 +377,7 @@ def run_triage() -> list[dict]:
# Write retro entry
retro_entry = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"timestamp": datetime.now(UTC).isoformat(),
"total_open": len(all_issues),
"scored": len(scored),
"ready": len(ready),

View File

@@ -35,9 +35,9 @@ from dashboard.routes.chat_api_v1 import router as chat_api_v1_router
from dashboard.routes.daily_run import router as daily_run_router
from dashboard.routes.db_explorer import router as db_explorer_router
from dashboard.routes.discord import router as discord_router
from dashboard.routes.energy import router as energy_router
from dashboard.routes.experiments import router as experiments_router
from dashboard.routes.grok import router as grok_router
from dashboard.routes.energy import router as energy_router
from dashboard.routes.health import router as health_router
from dashboard.routes.hermes import router as hermes_router
from dashboard.routes.loop_qa import router as loop_qa_router
@@ -48,6 +48,7 @@ from dashboard.routes.models import router as models_router
from dashboard.routes.nexus import router as nexus_router
from dashboard.routes.quests import router as quests_router
from dashboard.routes.scorecards import router as scorecards_router
from dashboard.routes.self_correction import router as self_correction_router
from dashboard.routes.sovereignty_metrics import router as sovereignty_metrics_router
from dashboard.routes.sovereignty_ws import router as sovereignty_ws_router
from dashboard.routes.spark import router as spark_router
@@ -55,7 +56,6 @@ from dashboard.routes.system import router as system_router
from dashboard.routes.tasks import router as tasks_router
from dashboard.routes.telegram import router as telegram_router
from dashboard.routes.thinking import router as thinking_router
from dashboard.routes.self_correction import router as self_correction_router
from dashboard.routes.three_strike import router as three_strike_router
from dashboard.routes.tools import router as tools_router
from dashboard.routes.tower import router as tower_router

View File

View File

@@ -0,0 +1,154 @@
# TODO: This code should be moved to the timmy-nostr repository once it's available.
# See ADR-024 for more details.
import json
import logging
from typing import Any
import websockets
from pynostr.event import Event
from pynostr.key import PrivateKey
logger = logging.getLogger(__name__)
class NostrClient:
"""
A client for interacting with the Nostr network.
"""
def __init__(self, relays: list[str], private_key_hex: str | None = None):
self.relays = relays
self._connections: dict[str, websockets.WebSocketClientProtocol] = {}
if private_key_hex:
self.private_key = PrivateKey.from_hex(private_key_hex)
self.public_key = self.private_key.public_key
else:
self.private_key = None
self.public_key = None
async def connect(self):
"""
Connect to all the relays.
"""
for relay in self.relays:
try:
conn = await websockets.connect(relay)
self._connections[relay] = conn
logger.info(f"Connected to Nostr relay: {relay}")
except Exception as e:
logger.error(f"Failed to connect to Nostr relay {relay}: {e}")
async def disconnect(self):
"""
Disconnect from all the relays.
"""
for relay, conn in self._connections.items():
try:
await conn.close()
logger.info(f"Disconnected from Nostr relay: {relay}")
except Exception as e:
logger.error(f"Failed to disconnect from Nostr relay {relay}: {e}")
self._connections = {}
async def subscribe_for_events(
self,
subscription_id: str,
filters: list[dict[str, Any]],
unsubscribe_on_eose: bool = True,
):
"""
Subscribe to events from the Nostr network.
"""
for relay, conn in self._connections.items():
try:
request = ["REQ", subscription_id]
request.extend(filters)
await conn.send(json.dumps(request))
logger.info(f"Subscribed to events on {relay} with sub_id: {subscription_id}")
async for message in conn:
message_json = json.loads(message)
message_type = message_json[0]
if message_type == "EVENT":
yield message_json[2]
elif message_type == "EOSE":
logger.info(f"End of stored events for sub_id: {subscription_id} on {relay}")
if unsubscribe_on_eose:
await self.unsubscribe(subscription_id, relay)
break
except Exception as e:
logger.error(f"Failed to subscribe to events on {relay}: {e}")
async def unsubscribe(self, subscription_id: str, relay: str):
"""
Unsubscribe from events.
"""
if relay not in self._connections:
logger.warning(f"Not connected to relay: {relay}")
return
conn = self._connections[relay]
try:
request = ["CLOSE", subscription_id]
await conn.send(json.dumps(request))
logger.info(f"Unsubscribed from sub_id: {subscription_id} on {relay}")
except Exception as e:
logger.error(f"Failed to unsubscribe from {relay}: {e}")
async def publish_event(self, event: Event):
"""
Publish an event to all connected relays.
"""
for relay, conn in self._connections.items():
try:
request = ["EVENT", event.to_dict()]
await conn.send(json.dumps(request))
logger.info(f"Published event {event.id} to {relay}")
except Exception as e:
logger.error(f"Failed to publish event to {relay}: {e}")
# NIP-89 Implementation
async def find_capability_cards(self, kinds: list[int] | None = None):
"""
Find capability cards (Kind 31990) for other agents.
"""
# Kind 31990 is for "Handler recommendations" which is a precursor to NIP-89
# NIP-89 is for "Application-specific data" which is a more general purpose
# kind. The issue description says "Kind 31990 'Capability Card' monitoring"
# which is a bit of a mix of concepts. I will use Kind 31990 as the issue
# description says.
filters = [{"kinds": [31990]}]
if kinds:
filters[0]["#k"] = [str(k) for k in kinds]
sub_id = "capability-card-finder"
async for event in self.subscribe_for_events(sub_id, filters):
yield event
# NIP-90 Implementation
async def create_job_request(
self,
kind: int,
content: str,
tags: list[list[str]] | None = None,
) -> Event:
"""
Create and publish a job request (Kind 5000-5999).
"""
if not self.private_key:
raise Exception("Cannot create job request without a private key.")
if not 5000 <= kind <= 5999:
raise ValueError("Job request kind must be between 5000 and 5999.")
event = Event(
pubkey=self.public_key.hex(),
kind=kind,
content=content,
tags=tags or [],
)
event.sign(self.private_key.hex())
await self.publish_event(event)
return event

View File

@@ -19,7 +19,6 @@ Refs: #1009
"""
import asyncio
import json
import logging
import subprocess
import time

View File

@@ -24,8 +24,8 @@ from infrastructure.models.registry import (
model_registry,
)
from infrastructure.models.router import (
TierLabel,
TieredModelRouter,
TierLabel,
classify_tier,
get_tiered_router,
)

View File

@@ -27,7 +27,6 @@ References:
- Issue #882 — Model Tiering Router: Local 8B / Hermes 70B / Cloud API Cascade
"""
import asyncio
import logging
import re
import time

View File

@@ -20,13 +20,11 @@ Usage::
from __future__ import annotations
import json
import logging
import sqlite3
import uuid
from collections.abc import Generator
from contextlib import closing, contextmanager
from datetime import UTC, datetime
from pathlib import Path
logger = logging.getLogger(__name__)

View File

@@ -28,6 +28,9 @@ KIMI_READY_LABEL = "kimi-ready"
# Label colour for the kimi-ready label (dark teal)
KIMI_LABEL_COLOR = "#006b75"
# Maximum number of concurrent active (open) Kimi-delegated issues
KIMI_MAX_ACTIVE_ISSUES = 3
# Keywords that suggest a task exceeds local capacity
_HEAVY_RESEARCH_KEYWORDS = frozenset(
{
@@ -176,6 +179,38 @@ async def _get_or_create_label(
return None
async def _count_active_kimi_issues(
client: Any,
base_url: str,
headers: dict[str, str],
repo: str,
) -> int:
"""Count open issues that carry the `kimi-ready` label.
Args:
client: httpx.AsyncClient instance.
base_url: Gitea API base URL.
headers: Auth headers.
repo: owner/repo string.
Returns:
Number of open kimi-ready issues, or 0 on error (fail-open to avoid
blocking delegation when Gitea is unreachable).
"""
try:
resp = await client.get(
f"{base_url}/repos/{repo}/issues",
headers=headers,
params={"state": "open", "type": "issues", "labels": KIMI_READY_LABEL, "limit": 50},
)
if resp.status_code == 200:
return len(resp.json())
logger.warning("count_active_kimi_issues: unexpected status %s", resp.status_code)
except Exception as exc:
logger.warning("count_active_kimi_issues failed: %s", exc)
return 0
async def create_kimi_research_issue(
task: str,
context: str,
@@ -217,6 +252,22 @@ async def create_kimi_research_issue(
async with httpx.AsyncClient(timeout=15) as client:
label_id = await _get_or_create_label(client, base_url, headers, repo)
active_count = await _count_active_kimi_issues(client, base_url, headers, repo)
if active_count >= KIMI_MAX_ACTIVE_ISSUES:
logger.warning(
"Kimi delegation cap reached (%d/%d active) — skipping: %s",
active_count,
KIMI_MAX_ACTIVE_ISSUES,
task[:60],
)
return {
"success": False,
"error": (
f"Kimi delegation cap reached: {active_count} active issues "
f"(max {KIMI_MAX_ACTIVE_ISSUES}). Resolve existing issues first."
),
}
body = _build_research_template(task, context, question, priority)
issue_payload: dict[str, Any] = {"title": task, "body": body}
if label_id is not None:

View File

@@ -21,7 +21,6 @@ import base64
import json
import logging
from datetime import UTC, datetime
from pathlib import Path
from typing import Any
import httpx

View File

@@ -22,21 +22,20 @@ import sqlite3
from datetime import datetime
from pathlib import Path
from timmy.thinking._db import Thought, _get_conn
from timmy.thinking.engine import ThinkingEngine
from timmy.thinking.seeds import (
SEED_TYPES,
_SENSITIVE_PATTERNS,
_META_OBSERVATION_PHRASES,
_THINK_TAG_RE,
_THINKING_PROMPT,
)
# Re-export HOT_MEMORY_PATH and SOUL_PATH so existing patch targets continue to work.
# Tests that patch "timmy.thinking.HOT_MEMORY_PATH" or "timmy.thinking.SOUL_PATH"
# should instead patch "timmy.thinking._snapshot.HOT_MEMORY_PATH" etc., but these
# re-exports are kept for any code that reads them from the top-level namespace.
from timmy.memory_system import HOT_MEMORY_PATH, SOUL_PATH # noqa: F401
from timmy.thinking._db import Thought, _get_conn
from timmy.thinking.engine import ThinkingEngine
from timmy.thinking.seeds import (
_META_OBSERVATION_PHRASES,
_SENSITIVE_PATTERNS,
_THINK_TAG_RE,
_THINKING_PROMPT,
SEED_TYPES,
)
logger = logging.getLogger(__name__)

View File

@@ -4,7 +4,6 @@ import logging
from pathlib import Path
from config import settings
from timmy.thinking.seeds import _META_OBSERVATION_PHRASES, _SENSITIVE_PATTERNS
logger = logging.getLogger(__name__)

View File

@@ -5,11 +5,11 @@ import random
from datetime import UTC, datetime
from timmy.thinking.seeds import (
SEED_TYPES,
_CREATIVE_SEEDS,
_EXISTENTIAL_SEEDS,
_OBSERVATION_SEEDS,
_SOVEREIGNTY_SEEDS,
SEED_TYPES,
)
logger = logging.getLogger(__name__)

View File

@@ -1,7 +1,7 @@
"""System snapshot and memory context mixin for the thinking engine."""
import logging
from datetime import UTC, datetime
from datetime import datetime
from timmy.memory_system import HOT_MEMORY_PATH, SOUL_PATH

View File

@@ -7,8 +7,7 @@ from difflib import SequenceMatcher
from pathlib import Path
from config import settings
from timmy.thinking._db import Thought, _DEFAULT_DB, _get_conn, _row_to_thought
from timmy.thinking._db import _DEFAULT_DB, Thought, _get_conn, _row_to_thought
from timmy.thinking._distillation import _DistillationMixin
from timmy.thinking._issue_filing import _IssueFilingMixin
from timmy.thinking._seeds_mixin import _SeedsMixin

View File

@@ -0,0 +1,93 @@
import json
import pytest
import websockets
from pynostr.key import PrivateKey
from src.infrastructure.clients.nostr_client import NostrClient
@pytest.mark.asyncio
async def test_nostr_client_connect_disconnect():
# Using a public mock relay for testing
relays = ["wss://relay.damus.io"]
client = NostrClient(relays)
await client.connect()
assert len(client._connections) == 1
for relay in relays:
assert relay in client._connections
assert client._connections[relay].state == websockets.protocol.State.OPEN
await client.disconnect()
assert len(client._connections) == 0
@pytest.mark.asyncio
async def test_find_capability_cards():
relays = ["wss://relay.damus.io"]
client = NostrClient(relays)
await client.connect()
# Create a dummy capability card event
# In a real scenario, this would be published by another agent
dummy_event = {
"id": "faked_id",
"pubkey": "faked_pubkey",
"created_at": 1678886400,
"kind": 31990,
"tags": [
["d", "test-platform"],
["k", "5000"]
],
"content": json.dumps({
"name": "Test Agent",
"about": "An agent for testing purposes"
}),
"sig": "faked_sig"
}
async def event_generator():
yield dummy_event
# Mock the subscribe_for_events method to return the dummy event
async def mock_subscribe_for_events(subscription_id, filters, unsubscribe_on_eose=True):
async for event in event_generator():
yield event
client.subscribe_for_events = mock_subscribe_for_events
async for event in client.find_capability_cards():
assert event["kind"] == 31990
await client.disconnect()
@pytest.mark.asyncio
async def test_create_job_request():
private_key = PrivateKey()
relays = ["wss://relay.damus.io"]
client = NostrClient(relays, private_key.hex())
await client.connect()
# Mock the publish_event method
published_events = []
async def mock_publish_event(event):
published_events.append(event)
client.publish_event = mock_publish_event
kind = 5001
content = "Test job request"
tags = [["d", "test-job"]]
event = await client.create_job_request(kind, content, tags)
assert event.kind == kind
assert event.content == content
assert event.tags == tags
assert event.pubkey == private_key.public_key.hex()
assert event.verify()
assert len(published_events) == 1
assert published_events[0] == event
await client.disconnect()

View File

@@ -27,7 +27,6 @@ from infrastructure.router.cascade import (
ProviderStatus,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------

View File

@@ -10,13 +10,13 @@ Covers:
- "Plan the optimal path to become Hortator" → LOCAL_HEAVY
"""
from unittest.mock import AsyncMock, MagicMock, patch
from unittest.mock import AsyncMock, MagicMock
import pytest
from infrastructure.models.router import (
TierLabel,
TieredModelRouter,
TierLabel,
_is_low_quality,
classify_tier,
get_tiered_router,

View File

@@ -5,7 +5,6 @@ from __future__ import annotations
from datetime import UTC, datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import pytest
from timmy.backlog_triage import (
@@ -28,7 +27,6 @@ from timmy.backlog_triage import (
score_issue,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------

View File

@@ -4,7 +4,6 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
# ---------------------------------------------------------------------------
# exceeds_local_capacity
# ---------------------------------------------------------------------------

View File

@@ -34,7 +34,6 @@ from timmy.quest_system import (
update_quest_progress,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------

View File

@@ -15,7 +15,6 @@ if "serpapi" not in sys.modules:
from timmy.research_tools import get_llm_client, google_web_search # noqa: E402
# ---------------------------------------------------------------------------
# google_web_search
# ---------------------------------------------------------------------------

View File

@@ -6,8 +6,7 @@ Refs: #957 (Session Sovereignty Report Generator)
import base64
import json
import time
from datetime import UTC, datetime
from pathlib import Path
from datetime import UTC
from unittest.mock import MagicMock, patch
import pytest
@@ -18,14 +17,12 @@ from timmy.sovereignty.session_report import (
_format_duration,
_gather_session_data,
_gather_sovereignty_data,
_render_markdown,
commit_report,
generate_and_commit_report,
generate_report,
mark_session_start,
)
# ---------------------------------------------------------------------------
# _format_duration
# ---------------------------------------------------------------------------

View File

@@ -7,11 +7,8 @@ from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from timmy.tools.search import _extract_crawl_content, scrape_url, web_search
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------

View File

@@ -12,9 +12,7 @@ import argparse
import json
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from unittest.mock import patch
# Add timmy_automations to path for imports
_TA_PATH = Path(__file__).resolve().parent.parent.parent / "timmy_automations" / "daily_run"

View File

@@ -7,7 +7,6 @@ falls back to the Ollama backend without crashing.
Refs #1284
"""
import sys
from unittest.mock import MagicMock, patch
import pytest

View File

@@ -11,11 +11,9 @@ from unittest.mock import MagicMock, patch
import pytest
from infrastructure.energy.monitor import (
_DEFAULT_MODEL_SIZE_GB,
EnergyBudgetMonitor,
InferenceSample,
_DEFAULT_MODEL_SIZE_GB,
_EFFICIENCY_SCORE_CEILING,
_WATTS_PER_GB_HEURISTIC,
)

View File

@@ -6,8 +6,10 @@ import pytest
from timmy.kimi_delegation import (
KIMI_LABEL_COLOR,
KIMI_MAX_ACTIVE_ISSUES,
KIMI_READY_LABEL,
_build_research_template,
_count_active_kimi_issues,
_extract_action_items,
_slugify,
delegate_research_to_kimi,
@@ -458,3 +460,197 @@ class TestExtractAndCreateFollowups:
assert result["success"] is True
assert 200 in result["created"]
# ── KIMI_MAX_ACTIVE_ISSUES constant ───────────────────────────────────────────
def test_kimi_max_active_issues_value():
assert KIMI_MAX_ACTIVE_ISSUES == 3
# ── _count_active_kimi_issues ─────────────────────────────────────────────────
class TestCountActiveKimiIssues:
@pytest.mark.asyncio
async def test_returns_count_from_api(self):
mock_client = AsyncMock()
resp = MagicMock()
resp.status_code = 200
resp.json.return_value = [{"number": 1}, {"number": 2}]
mock_client.get.return_value = resp
count = await _count_active_kimi_issues(
mock_client, "http://gitea.local/api/v1", {}, "owner/repo"
)
assert count == 2
@pytest.mark.asyncio
async def test_returns_zero_on_api_error(self):
mock_client = AsyncMock()
resp = MagicMock()
resp.status_code = 500
mock_client.get.return_value = resp
count = await _count_active_kimi_issues(
mock_client, "http://gitea.local/api/v1", {}, "owner/repo"
)
assert count == 0
@pytest.mark.asyncio
async def test_returns_zero_on_exception(self):
mock_client = AsyncMock()
mock_client.get.side_effect = Exception("network error")
count = await _count_active_kimi_issues(
mock_client, "http://gitea.local/api/v1", {}, "owner/repo"
)
assert count == 0
@pytest.mark.asyncio
async def test_queries_open_issues_with_kimi_label(self):
mock_client = AsyncMock()
resp = MagicMock()
resp.status_code = 200
resp.json.return_value = []
mock_client.get.return_value = resp
await _count_active_kimi_issues(
mock_client, "http://gitea.local/api/v1", {}, "owner/repo"
)
call_kwargs = mock_client.get.call_args.kwargs
assert call_kwargs["params"]["state"] == "open"
assert call_kwargs["params"]["labels"] == KIMI_READY_LABEL
# ── Cap enforcement in create_kimi_research_issue ─────────────────────────────
class TestKimiCapEnforcement:
def _make_settings(self):
mock_settings = MagicMock()
mock_settings.gitea_enabled = True
mock_settings.gitea_token = "fake-token"
mock_settings.gitea_url = "http://gitea.local"
mock_settings.gitea_repo = "owner/repo"
return mock_settings
def _make_async_client(self, label_json, issue_count):
label_resp = MagicMock()
label_resp.status_code = 200
label_resp.json.return_value = label_json
count_resp = MagicMock()
count_resp.status_code = 200
count_resp.json.return_value = [{"number": i} for i in range(issue_count)]
mock_client = AsyncMock()
mock_client.get.side_effect = [label_resp, count_resp]
async_ctx = AsyncMock()
async_ctx.__aenter__.return_value = mock_client
async_ctx.__aexit__.return_value = False
return async_ctx
@pytest.mark.asyncio
async def test_cap_reached_returns_failure(self):
from timmy.kimi_delegation import create_kimi_research_issue
async_ctx = self._make_async_client(
[{"name": "kimi-ready", "id": 7}], issue_count=3
)
with (
patch("config.settings", self._make_settings()),
patch("httpx.AsyncClient", return_value=async_ctx),
):
result = await create_kimi_research_issue("Task", "ctx", "Q?")
assert result["success"] is False
assert "cap" in result["error"].lower()
assert "3" in result["error"]
@pytest.mark.asyncio
async def test_cap_exceeded_returns_failure(self):
from timmy.kimi_delegation import create_kimi_research_issue
async_ctx = self._make_async_client(
[{"name": "kimi-ready", "id": 7}], issue_count=5
)
with (
patch("config.settings", self._make_settings()),
patch("httpx.AsyncClient", return_value=async_ctx),
):
result = await create_kimi_research_issue("Task", "ctx", "Q?")
assert result["success"] is False
@pytest.mark.asyncio
async def test_below_cap_proceeds_to_create(self):
from timmy.kimi_delegation import create_kimi_research_issue
label_resp = MagicMock()
label_resp.status_code = 200
label_resp.json.return_value = [{"name": "kimi-ready", "id": 7}]
count_resp = MagicMock()
count_resp.status_code = 200
count_resp.json.return_value = [{"number": 1}, {"number": 2}] # 2 active < cap of 3
issue_resp = MagicMock()
issue_resp.status_code = 201
issue_resp.json.return_value = {
"number": 99,
"html_url": "http://gitea.local/issues/99",
}
mock_client = AsyncMock()
mock_client.get.side_effect = [label_resp, count_resp]
mock_client.post.return_value = issue_resp
async_ctx = AsyncMock()
async_ctx.__aenter__.return_value = mock_client
async_ctx.__aexit__.return_value = False
with (
patch("config.settings", self._make_settings()),
patch("httpx.AsyncClient", return_value=async_ctx),
):
result = await create_kimi_research_issue("Task", "ctx", "Q?")
assert result["success"] is True
assert result["issue_number"] == 99
@pytest.mark.asyncio
async def test_zero_active_issues_proceeds(self):
from timmy.kimi_delegation import create_kimi_research_issue
label_resp = MagicMock()
label_resp.status_code = 200
label_resp.json.return_value = [{"name": "kimi-ready", "id": 7}]
count_resp = MagicMock()
count_resp.status_code = 200
count_resp.json.return_value = []
issue_resp = MagicMock()
issue_resp.status_code = 201
issue_resp.json.return_value = {"number": 50, "html_url": "http://gitea.local/issues/50"}
mock_client = AsyncMock()
mock_client.get.side_effect = [label_resp, count_resp]
mock_client.post.return_value = issue_resp
async_ctx = AsyncMock()
async_ctx.__aenter__.return_value = mock_client
async_ctx.__aexit__.return_value = False
with (
patch("config.settings", self._make_settings()),
patch("httpx.AsyncClient", return_value=async_ctx),
):
result = await create_kimi_research_issue("Task", "ctx", "Q?")
assert result["success"] is True

View File

@@ -1,9 +1,5 @@
"""Unit tests for infrastructure.self_correction."""
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest

View File

@@ -13,10 +13,9 @@ Usage:
import argparse
import dataclasses
import json
import os
import sys
import time
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
try:
@@ -28,12 +27,14 @@ except ImportError:
# Add parent dir to path so levels can be imported
sys.path.insert(0, str(Path(__file__).parent))
from levels import level_0_coin_flip
from levels import level_1_tic_tac_toe
from levels import level_2_resource_mgmt
from levels import level_3_battle_tactics
from levels import level_4_trade_route
from levels import level_5_mini_campaign
from levels import (
level_0_coin_flip,
level_1_tic_tac_toe,
level_2_resource_mgmt,
level_3_battle_tactics,
level_4_trade_route,
level_5_mini_campaign,
)
ALL_LEVELS = [
level_0_coin_flip,
@@ -86,7 +87,7 @@ def run_benchmark(
levels_to_run = list(range(len(ALL_LEVELS)))
print(f"\n{'=' * 60}")
print(f" Timmy Cognitive Benchmark — Project Bannerlord M0")
print(" Timmy Cognitive Benchmark — Project Bannerlord M0")
print(f"{'=' * 60}")
print(f" Model: {model}")
print(f" Levels: {levels_to_run}")
@@ -100,7 +101,7 @@ def run_benchmark(
"model": model,
"skipped": True,
"reason": f"Model '{model}' not available",
"timestamp": datetime.now(timezone.utc).isoformat(),
"timestamp": datetime.now(UTC).isoformat(),
}
else:
print(f" ERROR: Model '{model}' not found in Ollama.", file=sys.stderr)
@@ -110,7 +111,7 @@ def run_benchmark(
results = {
"model": model,
"timestamp": datetime.now(timezone.utc).isoformat(),
"timestamp": datetime.now(UTC).isoformat(),
"skipped": False,
"levels": {},
"summary": {},

View File

@@ -21,11 +21,10 @@ import json
import os
import sys
from dataclasses import dataclass, field
from datetime import datetime, timezone
from datetime import UTC, datetime
from pathlib import Path
from typing import Any
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
# ── Configuration ─────────────────────────────────────────────────────────
@@ -260,7 +259,7 @@ def score_issue_for_path(issue: dict) -> int:
if updated_at:
try:
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
days_old = (datetime.now(timezone.utc) - updated).days
days_old = (datetime.now(UTC) - updated).days
if days_old < 7:
score += 2
elif days_old < 30:
@@ -388,7 +387,7 @@ def build_golden_path(
4. One more micro-fix or docs (closure)
"""
path = GoldenPath(
generated_at=datetime.now(timezone.utc).isoformat(),
generated_at=datetime.now(UTC).isoformat(),
target_minutes=target_minutes,
)
@@ -478,7 +477,7 @@ def generate_golden_path(
if not client.is_available():
# Return empty path with error indication
return GoldenPath(
generated_at=datetime.now(timezone.utc).isoformat(),
generated_at=datetime.now(UTC).isoformat(),
target_minutes=target_minutes,
items=[],
)

View File

@@ -17,11 +17,11 @@ import json
import os
import sys
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from datetime import UTC, datetime, timedelta
from pathlib import Path
from typing import Any
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
# ── Configuration ─────────────────────────────────────────────────────────
@@ -327,7 +327,7 @@ def check_critical_issues(client: GiteaClient, config: dict) -> IssueSignal:
issues=all_critical[:10], # Limit stored issues
)
except (HTTPError, URLError) as exc:
except (HTTPError, URLError):
return IssueSignal(
count=0,
p0_count=0,
@@ -419,7 +419,7 @@ def check_token_economy(config: dict) -> TokenEconomySignal:
try:
# Read last 24 hours of transactions
since = datetime.now(timezone.utc) - timedelta(hours=24)
since = datetime.now(UTC) - timedelta(hours=24)
recent_mint = 0
recent_burn = 0
@@ -511,7 +511,7 @@ def generate_snapshot(config: dict, token: str | None) -> HealthSnapshot:
overall = calculate_overall_status(ci, issues, flakiness)
return HealthSnapshot(
timestamp=datetime.now(timezone.utc).isoformat(),
timestamp=datetime.now(UTC).isoformat(),
overall_status=overall,
ci=ci,
issues=issues,

View File

@@ -19,11 +19,11 @@ import argparse
import json
import os
import sys
from datetime import datetime, timedelta, timezone
from datetime import UTC, datetime, timedelta
from pathlib import Path
from typing import Any
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
# ── Token Economy Integration ──────────────────────────────────────────────
# Import token rules helpers for tracking Daily Run rewards
@@ -31,12 +31,11 @@ from urllib.error import HTTPError, URLError
sys.path.insert(
0, str(Path(__file__).resolve().parent.parent)
)
from utils.token_rules import TokenRules, compute_token_reward
# Health snapshot lives in the same package
from health_snapshot import generate_snapshot as _generate_health_snapshot
from health_snapshot import get_token as _hs_get_token
from health_snapshot import load_config as _hs_load_config
from utils.token_rules import TokenRules, compute_token_reward
# ── Configuration ─────────────────────────────────────────────────────────
@@ -284,7 +283,7 @@ def generate_agenda(issues: list[dict], config: dict) -> dict:
items.append(item)
return {
"generated_at": datetime.now(timezone.utc).isoformat(),
"generated_at": datetime.now(UTC).isoformat(),
"time_budget_minutes": agenda_time,
"item_count": len(items),
"items": items,
@@ -322,7 +321,7 @@ def print_agenda(agenda: dict) -> None:
def fetch_recent_activity(client: GiteaClient, config: dict) -> dict:
"""Fetch recent issues and PRs from the lookback window."""
lookback_hours = config.get("lookback_hours", 24)
since = datetime.now(timezone.utc) - timedelta(hours=lookback_hours)
since = datetime.now(UTC) - timedelta(hours=lookback_hours)
since_str = since.isoformat()
activity = {
@@ -399,7 +398,7 @@ def load_cycle_data() -> dict:
continue
# Get entries from last 24 hours
since = datetime.now(timezone.utc) - timedelta(hours=24)
since = datetime.now(UTC) - timedelta(hours=24)
recent = [
e for e in entries
if e.get("timestamp") and datetime.fromisoformat(e["timestamp"].replace("Z", "+00:00")) >= since
@@ -426,7 +425,7 @@ def load_cycle_data() -> dict:
def generate_day_summary(activity: dict, cycles: dict) -> dict:
"""Generate a day summary from activity data."""
return {
"generated_at": datetime.now(timezone.utc).isoformat(),
"generated_at": datetime.now(UTC).isoformat(),
"lookback_hours": 24,
"issues_touched": len(activity.get("issues_touched", [])),
"issues_closed": len(activity.get("issues_closed", [])),

View File

@@ -25,7 +25,6 @@ import sys
from collections import Counter
from datetime import UTC, datetime, timedelta
from pathlib import Path
from typing import Any
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen

View File

@@ -12,7 +12,6 @@ Refs: #1105
from __future__ import annotations
import json
import logging
import os
import shutil