feat: distributed brain architecture with rqlite and local embeddings (#118)
- Add new brain module with rqlite-based distributed memory and task queue - Implement BrainClient for memory operations (store, recall, search) - Implement DistributedWorker for continuous task processing - Add local embeddings via sentence-transformers (all-MiniLM-L6-v2) - No OpenAI dependency, runs 100% local on CPU - 384-dim embeddings, 80MB model download - Deprecate persona system (swarm/personas.py, persona_node.py) - Deprecate hands system (hands/__init__.py, routes) - Update marketplace, tools, hands routes for brain integration - Add sentence-transformers and numpy to dependencies - All changes backward compatible with deprecation warnings Co-authored-by: Alexander Payne <apayne@MM.local>
This commit is contained in:
committed by
GitHub
parent
d9bb26b9c5
commit
f7c574e0b2
225
poetry.lock
generated
225
poetry.lock
generated
@@ -1023,10 +1023,9 @@ standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[stand
|
||||
name = "filelock"
|
||||
version = "3.24.3"
|
||||
description = "A platform independent file lock."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "filelock-3.24.3-py3-none-any.whl", hash = "sha256:426e9a4660391f7f8a810d71b0555bce9008b0a1cc342ab1f6947d37639e002d"},
|
||||
{file = "filelock-3.24.3.tar.gz", hash = "sha256:011a5644dc937c22699943ebbfc46e969cdde3e171470a6e40b9533e5a72affa"},
|
||||
@@ -1177,10 +1176,9 @@ files = [
|
||||
name = "fsspec"
|
||||
version = "2026.2.0"
|
||||
description = "File-system specification"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "fsspec-2026.2.0-py3-none-any.whl", hash = "sha256:98de475b5cb3bd66bedd5c4679e87b4fdfe1a3bf4d707b151b3c07e58c9a2437"},
|
||||
{file = "fsspec-2026.2.0.tar.gz", hash = "sha256:6544e34b16869f5aacd5b90bdf1a71acb37792ea3ddf6125ee69a22a53fb8bff"},
|
||||
@@ -1348,10 +1346,10 @@ hyperframe = ">=6.1,<7"
|
||||
name = "hf-xet"
|
||||
version = "1.3.2"
|
||||
description = "Fast transfer of large files with the Hugging Face Hub."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\" and (platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\")"
|
||||
markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""
|
||||
files = [
|
||||
{file = "hf_xet-1.3.2-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:335a8f36c55fd35a92d0062f4e9201b4015057e62747b7e7001ffb203c0ee1d2"},
|
||||
{file = "hf_xet-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c1ae4d3a716afc774e66922f3cac8206bfa707db13f6a7e62dfff74bfc95c9a8"},
|
||||
@@ -1500,10 +1498,9 @@ zstd = ["zstandard (>=0.18.0)"]
|
||||
name = "huggingface-hub"
|
||||
version = "1.5.0"
|
||||
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9.0"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "huggingface_hub-1.5.0-py3-none-any.whl", hash = "sha256:c9c0b3ab95a777fc91666111f3b3ede71c0cdced3614c553a64e98920585c4ee"},
|
||||
{file = "huggingface_hub-1.5.0.tar.gz", hash = "sha256:f281838db29265880fb543de7a23b0f81d3504675de82044307ea3c6c62f799d"},
|
||||
@@ -1753,6 +1750,18 @@ files = [
|
||||
{file = "jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "joblib"
|
||||
version = "1.5.3"
|
||||
description = "Lightweight pipelining with Python functions"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "joblib-1.5.3-py3-none-any.whl", hash = "sha256:5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713"},
|
||||
{file = "joblib-1.5.3.tar.gz", hash = "sha256:8561a3269e6801106863fd0d6d84bb737be9e7631e33aaed3fb9ce5953688da3"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "4.0.0"
|
||||
@@ -1918,10 +1927,9 @@ test = ["coveralls (>=3.0,<4.0)", "pytest (>=3.0.0,<7.0.0)", "pytest-cov (>=2.5.
|
||||
name = "mpmath"
|
||||
version = "1.3.0"
|
||||
description = "Python library for arbitrary-precision floating-point arithmetic"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
|
||||
{file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
|
||||
@@ -2094,10 +2102,9 @@ files = [
|
||||
name = "networkx"
|
||||
version = "3.6"
|
||||
description = "Python package for creating and manipulating graphs and networks"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.11"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "networkx-3.6-py3-none-any.whl", hash = "sha256:cdb395b105806062473d3be36458d8f1459a4e4b98e236a66c3a48996e07684f"},
|
||||
{file = "networkx-3.6.tar.gz", hash = "sha256:285276002ad1f7f7da0f7b42f004bcba70d381e936559166363707fdad3d72ad"},
|
||||
@@ -2200,10 +2207,10 @@ files = [
|
||||
name = "nvidia-cublas-cu12"
|
||||
version = "12.6.4.1"
|
||||
description = "CUBLAS native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08ed2686e9875d01b58e3cb379c6896df8e76c75e0d4a7f7dace3d7b6d9ef8eb"},
|
||||
{file = "nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:235f728d6e2a409eddf1df58d5b0921cf80cfa9e72b9f2775ccb7b4a87984668"},
|
||||
@@ -2214,10 +2221,10 @@ files = [
|
||||
name = "nvidia-cuda-cupti-cu12"
|
||||
version = "12.6.80"
|
||||
description = "CUDA profiling tools runtime libs."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:166ee35a3ff1587f2490364f90eeeb8da06cd867bd5b701bf7f9a02b78bc63fc"},
|
||||
{file = "nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.whl", hash = "sha256:358b4a1d35370353d52e12f0a7d1769fc01ff74a191689d3870b2123156184c4"},
|
||||
@@ -2230,10 +2237,10 @@ files = [
|
||||
name = "nvidia-cuda-nvrtc-cu12"
|
||||
version = "12.6.77"
|
||||
description = "NVRTC native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5847f1d6e5b757f1d2b3991a01082a44aad6f10ab3c5c0213fa3e25bddc25a13"},
|
||||
{file = "nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:35b0cc6ee3a9636d5409133e79273ce1f3fd087abb0532d2d2e8fff1fe9efc53"},
|
||||
@@ -2244,10 +2251,10 @@ files = [
|
||||
name = "nvidia-cuda-runtime-cu12"
|
||||
version = "12.6.77"
|
||||
description = "CUDA Runtime native Libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6116fad3e049e04791c0256a9778c16237837c08b27ed8c8401e2e45de8d60cd"},
|
||||
{file = "nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d461264ecb429c84c8879a7153499ddc7b19b5f8d84c204307491989a365588e"},
|
||||
@@ -2260,10 +2267,10 @@ files = [
|
||||
name = "nvidia-cudnn-cu12"
|
||||
version = "9.5.1.17"
|
||||
description = "cuDNN runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9fd4584468533c61873e5fda8ca41bac3a38bcb2d12350830c69b0a96a7e4def"},
|
||||
{file = "nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:30ac3869f6db17d170e0e556dd6cc5eee02647abc31ca856634d5a40f82c15b2"},
|
||||
@@ -2277,10 +2284,10 @@ nvidia-cublas-cu12 = "*"
|
||||
name = "nvidia-cufft-cu12"
|
||||
version = "11.3.0.4"
|
||||
description = "CUFFT native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6"},
|
||||
{file = "nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8510990de9f96c803a051822618d42bf6cb8f069ff3f48d93a8486efdacb48fb"},
|
||||
@@ -2296,10 +2303,10 @@ nvidia-nvjitlink-cu12 = "*"
|
||||
name = "nvidia-cufile-cu12"
|
||||
version = "1.11.1.6"
|
||||
description = "cuFile GPUDirect libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc23469d1c7e52ce6c1d55253273d32c565dd22068647f3aa59b3c6b005bf159"},
|
||||
{file = "nvidia_cufile_cu12-1.11.1.6-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:8f57a0051dcf2543f6dc2b98a98cb2719c37d3cee1baba8965d57f3bbc90d4db"},
|
||||
@@ -2309,10 +2316,10 @@ files = [
|
||||
name = "nvidia-curand-cu12"
|
||||
version = "10.3.7.77"
|
||||
description = "CURAND native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:6e82df077060ea28e37f48a3ec442a8f47690c7499bff392a5938614b56c98d8"},
|
||||
{file = "nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a42cd1344297f70b9e39a1e4f467a4e1c10f1da54ff7a85c12197f6c652c8bdf"},
|
||||
@@ -2325,10 +2332,10 @@ files = [
|
||||
name = "nvidia-cusolver-cu12"
|
||||
version = "11.7.1.2"
|
||||
description = "CUDA solver native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0"},
|
||||
{file = "nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e9e49843a7707e42022babb9bcfa33c29857a93b88020c4e4434656a655b698c"},
|
||||
@@ -2346,10 +2353,10 @@ nvidia-nvjitlink-cu12 = "*"
|
||||
name = "nvidia-cusparse-cu12"
|
||||
version = "12.5.4.2"
|
||||
description = "CUSPARSE native runtime libraries"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887"},
|
||||
{file = "nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7aa32fa5470cf754f72d1116c7cbc300b4e638d3ae5304cfa4a638a5b87161b1"},
|
||||
@@ -2365,10 +2372,10 @@ nvidia-nvjitlink-cu12 = "*"
|
||||
name = "nvidia-cusparselt-cu12"
|
||||
version = "0.6.3"
|
||||
description = "NVIDIA cuSPARSELt"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8371549623ba601a06322af2133c4a44350575f5a3108fb75f3ef20b822ad5f1"},
|
||||
{file = "nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e5c8a26c36445dd2e6812f1177978a24e2d37cacce7e090f297a688d1ec44f46"},
|
||||
@@ -2379,10 +2386,10 @@ files = [
|
||||
name = "nvidia-nccl-cu12"
|
||||
version = "2.26.2"
|
||||
description = "NVIDIA Collective Communication Library (NCCL) Runtime"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5c196e95e832ad30fbbb50381eb3cbd1fadd5675e587a548563993609af19522"},
|
||||
{file = "nvidia_nccl_cu12-2.26.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:694cf3879a206553cc9d7dbda76b13efaf610fdb70a50cba303de1b0d1530ac6"},
|
||||
@@ -2392,10 +2399,10 @@ files = [
|
||||
name = "nvidia-nvjitlink-cu12"
|
||||
version = "12.6.85"
|
||||
description = "Nvidia JIT LTO Library"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a"},
|
||||
{file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41"},
|
||||
@@ -2406,10 +2413,10 @@ files = [
|
||||
name = "nvidia-nvtx-cu12"
|
||||
version = "12.6.77"
|
||||
description = "NVIDIA Tools Extension"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f44f8d86bb7d5629988d61c8d3ae61dddb2015dee142740536bc7481b022fe4b"},
|
||||
{file = "nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:adcaabb9d436c9761fca2b13959a2d237c5f9fd406c8e4b723c695409ff88059"},
|
||||
@@ -6980,10 +6987,9 @@ xxhash = ["xxhash (>=3.6.0,<3.7.0)"]
|
||||
name = "regex"
|
||||
version = "2026.2.28"
|
||||
description = "Alternative regular expression module, to replace re."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "regex-2026.2.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fc48c500838be6882b32748f60a15229d2dea96e59ef341eaa96ec83538f498d"},
|
||||
{file = "regex-2026.2.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2afa673660928d0b63d84353c6c08a8a476ddfc4a47e11742949d182e6863ce8"},
|
||||
@@ -7146,10 +7152,9 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
name = "safetensors"
|
||||
version = "0.7.0"
|
||||
description = ""
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "safetensors-0.7.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c82f4d474cf725255d9e6acf17252991c3c8aac038d6ef363a4bf8be2f6db517"},
|
||||
{file = "safetensors-0.7.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:94fd4858284736bb67a897a41608b5b0c2496c9bdb3bf2af1fa3409127f20d57"},
|
||||
@@ -7190,14 +7195,75 @@ testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2
|
||||
testingfree = ["huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"]
|
||||
torch = ["packaging", "safetensors[numpy]", "torch (>=1.10)"]
|
||||
|
||||
[[package]]
|
||||
name = "scikit-learn"
|
||||
version = "1.8.0"
|
||||
description = "A set of python modules for machine learning and data mining"
|
||||
optional = false
|
||||
python-versions = ">=3.11"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:146b4d36f800c013d267b29168813f7a03a43ecd2895d04861f1240b564421da"},
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f984ca4b14914e6b4094c5d52a32ea16b49832c03bd17a110f004db3c223e8e1"},
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e30adb87f0cc81c7690a84f7932dd66be5bac57cfe16b91cb9151683a4a2d3b"},
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ada8121bcb4dac28d930febc791a69f7cb1673c8495e5eee274190b73a4559c1"},
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:c57b1b610bd1f40ba43970e11ce62821c2e6569e4d74023db19c6b26f246cb3b"},
|
||||
{file = "scikit_learn-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:2838551e011a64e3053ad7618dda9310175f7515f1742fa2d756f7c874c05961"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5fb63362b5a7ddab88e52b6dbb47dac3fd7dafeee740dc6c8d8a446ddedade8e"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:5025ce924beccb28298246e589c691fe1b8c1c96507e6d27d12c5fadd85bfd76"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4496bb2cf7a43ce1a2d7524a79e40bc5da45cf598dbf9545b7e8316ccba47bb4"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0bcfe4d0d14aec44921545fd2af2338c7471de9cb701f1da4c9d85906ab847a"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:35c007dedb2ffe38fe3ee7d201ebac4a2deccd2408e8621d53067733e3c74809"},
|
||||
{file = "scikit_learn-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:8c497fff237d7b4e07e9ef1a640887fa4fb765647f86fbe00f969ff6280ce2bb"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0d6ae97234d5d7079dc0040990a6f7aeb97cb7fa7e8945f1999a429b23569e0a"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:edec98c5e7c128328124a029bceb09eda2d526997780fef8d65e9a69eead963e"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:74b66d8689d52ed04c271e1329f0c61635bcaf5b926db9b12d58914cdc01fe57"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8fdf95767f989b0cfedb85f7ed8ca215d4be728031f56ff5a519ee1e3276dc2e"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:2de443b9373b3b615aec1bb57f9baa6bb3a9bd093f1269ba95c17d870422b271"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:eddde82a035681427cbedded4e6eff5e57fa59216c2e3e90b10b19ab1d0a65c3"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7cc267b6108f0a1499a734167282c00c4ebf61328566b55ef262d48e9849c735"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:fe1c011a640a9f0791146011dfd3c7d9669785f9fed2b2a5f9e207536cf5c2fd"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72358cce49465d140cc4e7792015bb1f0296a9742d5622c67e31399b75468b9e"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:80832434a6cc114f5219211eec13dcbc16c2bac0e31ef64c6d346cde3cf054cb"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ee787491dbfe082d9c3013f01f5991658b0f38aa8177e4cd4bf434c58f551702"},
|
||||
{file = "scikit_learn-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf97c10a3f5a7543f9b88cbf488d33d175e9146115a451ae34568597ba33dcde"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c22a2da7a198c28dd1a6e1136f19c830beab7fdca5b3e5c8bba8394f8a5c45b3"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:6b595b07a03069a2b1740dc08c2299993850ea81cce4fe19b2421e0c970de6b7"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:29ffc74089f3d5e87dfca4c2c8450f88bdc61b0fc6ed5d267f3988f19a1309f6"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fb65db5d7531bccf3a4f6bec3462223bea71384e2cda41da0f10b7c292b9e7c4"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:56079a99c20d230e873ea40753102102734c5953366972a71d5cb39a32bc40c6"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:3bad7565bc9cf37ce19a7c0d107742b320c1285df7aab1a6e2d28780df167242"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:4511be56637e46c25721e83d1a9cea9614e7badc7040c4d573d75fbe257d6fd7"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:a69525355a641bf8ef136a7fa447672fb54fe8d60cab5538d9eb7c6438543fb9"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c2656924ec73e5939c76ac4c8b026fc203b83d8900362eb2599d8aee80e4880f"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15fc3b5d19cc2be65404786857f2e13c70c83dd4782676dd6814e3b89dc8f5b9"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:00d6f1d66fbcf4eba6e356e1420d33cc06c70a45bb1363cd6f6a8e4ebbbdece2"},
|
||||
{file = "scikit_learn-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f28dd15c6bb0b66ba09728cf09fd8736c304be29409bd8445a080c1280619e8c"},
|
||||
{file = "scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
joblib = ">=1.3.0"
|
||||
numpy = ">=1.24.1"
|
||||
scipy = ">=1.10.0"
|
||||
threadpoolctl = ">=3.2.0"
|
||||
|
||||
[package.extras]
|
||||
benchmark = ["matplotlib (>=3.6.1)", "memory_profiler (>=0.57.0)", "pandas (>=1.5.0)"]
|
||||
build = ["cython (>=3.1.2)", "meson-python (>=0.17.1)", "numpy (>=1.24.1)", "scipy (>=1.10.0)"]
|
||||
docs = ["Pillow (>=10.1.0)", "matplotlib (>=3.6.1)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.5.0)", "plotly (>=5.18.0)", "polars (>=0.20.30)", "pooch (>=1.8.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.22.0)", "seaborn (>=0.13.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"]
|
||||
examples = ["matplotlib (>=3.6.1)", "pandas (>=1.5.0)", "plotly (>=5.18.0)", "pooch (>=1.8.0)", "scikit-image (>=0.22.0)", "seaborn (>=0.13.0)"]
|
||||
install = ["joblib (>=1.3.0)", "numpy (>=1.24.1)", "scipy (>=1.10.0)", "threadpoolctl (>=3.2.0)"]
|
||||
maintenance = ["conda-lock (==3.0.1)"]
|
||||
tests = ["matplotlib (>=3.6.1)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.5.0)", "polars (>=0.20.30)", "pooch (>=1.8.0)", "pyamg (>=5.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "scipy"
|
||||
version = "1.17.1"
|
||||
description = "Fundamental algorithms for scientific computing in Python"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.11"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "scipy-1.17.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:1f95b894f13729334fb990162e911c9e5dc1ab390c58aa6cbecb389c5b5e28ec"},
|
||||
{file = "scipy-1.17.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:e18f12c6b0bc5a592ed23d3f7b891f68fd7f8241d69b7883769eb5d5dfb52696"},
|
||||
@@ -7291,14 +7357,44 @@ typing_extensions = ">=4.15.0,<5.0"
|
||||
urllib3 = {version = ">=2.6.3,<3.0", extras = ["socks"]}
|
||||
websocket-client = ">=1.8.0,<2.0"
|
||||
|
||||
[[package]]
|
||||
name = "sentence-transformers"
|
||||
version = "5.2.3"
|
||||
description = "Embeddings, Retrieval, and Reranking"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sentence_transformers-5.2.3-py3-none-any.whl", hash = "sha256:6437c62d4112b615ddebda362dfc16a4308d604c5b68125ed586e3e95d5b2e30"},
|
||||
{file = "sentence_transformers-5.2.3.tar.gz", hash = "sha256:3cd3044e1f3fe859b6a1b66336aac502eaae5d3dd7d5c8fc237f37fbf58137c7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
huggingface-hub = ">=0.20.0"
|
||||
numpy = "*"
|
||||
scikit-learn = "*"
|
||||
scipy = "*"
|
||||
torch = ">=1.11.0"
|
||||
tqdm = "*"
|
||||
transformers = ">=4.41.0,<6.0.0"
|
||||
typing_extensions = ">=4.5.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["Pillow", "accelerate (>=0.20.3)", "datasets", "peft", "pre-commit", "pytest", "pytest-cov"]
|
||||
image = ["Pillow"]
|
||||
onnx = ["optimum-onnx[onnxruntime]"]
|
||||
onnx-gpu = ["optimum-onnx[onnxruntime-gpu]"]
|
||||
openvino = ["optimum-intel[openvino]"]
|
||||
train = ["accelerate (>=0.20.3)", "datasets"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools"
|
||||
version = "82.0.0"
|
||||
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\" and python_version >= \"3.12\" or extra == \"bigbrain\" and platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "setuptools-82.0.0-py3-none-any.whl", hash = "sha256:70b18734b607bd1da571d097d236cfcfacaf01de45717d59e6e04b96877532e0"},
|
||||
{file = "setuptools-82.0.0.tar.gz", hash = "sha256:22e0a2d69474c6ae4feb01951cb69d515ed23728cf96d05513d36e42b62b37cb"},
|
||||
@@ -7487,10 +7583,9 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
|
||||
name = "sympy"
|
||||
version = "1.14.0"
|
||||
description = "Computer algebra system (CAS) in Python"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"},
|
||||
{file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"},
|
||||
@@ -7502,14 +7597,25 @@ mpmath = ">=1.1.0,<1.4"
|
||||
[package.extras]
|
||||
dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "threadpoolctl"
|
||||
version = "3.6.0"
|
||||
description = "threadpoolctl"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"},
|
||||
{file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokenizers"
|
||||
version = "0.22.2"
|
||||
description = ""
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"},
|
||||
{file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"},
|
||||
@@ -7549,10 +7655,9 @@ testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff",
|
||||
name = "torch"
|
||||
version = "2.7.1"
|
||||
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.9.0"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "torch-2.7.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a103b5d782af5bd119b81dbcc7ffc6fa09904c423ff8db397a1e6ea8fd71508f"},
|
||||
{file = "torch-2.7.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:fe955951bdf32d182ee8ead6c3186ad54781492bf03d547d31771a01b3d6fb7d"},
|
||||
@@ -7634,10 +7739,9 @@ telegram = ["requests"]
|
||||
name = "transformers"
|
||||
version = "5.2.0"
|
||||
description = "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.10.0"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "transformers-5.2.0-py3-none-any.whl", hash = "sha256:9ecaf243dc45bee11a7d93f8caf03746accc0cb069181bbf4ad8566c53e854b4"},
|
||||
{file = "transformers-5.2.0.tar.gz", hash = "sha256:0088b8b46ccc9eff1a1dca72b5d618a5ee3b1befc3e418c9512b35dea9f9a650"},
|
||||
@@ -7728,10 +7832,10 @@ wsproto = ">=0.14"
|
||||
name = "triton"
|
||||
version = "3.3.1"
|
||||
description = "A language and compiler for custom Deep Learning operations"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and extra == \"bigbrain\""
|
||||
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
|
||||
files = [
|
||||
{file = "triton-3.3.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b74db445b1c562844d3cfad6e9679c72e93fdfb1a90a24052b03bb5c49d1242e"},
|
||||
{file = "triton-3.3.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b31e3aa26f8cb3cc5bf4e187bf737cbacf17311e1112b781d4a059353dfd731b"},
|
||||
@@ -7771,10 +7875,9 @@ shellingham = ">=1.3.0"
|
||||
name = "typer-slim"
|
||||
version = "0.24.0"
|
||||
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"bigbrain\""
|
||||
files = [
|
||||
{file = "typer_slim-0.24.0-py3-none-any.whl", hash = "sha256:d5d7ee1ee2834d5020c7c616ed5e0d0f29b9a4b1dd283bdebae198ec09778d0e"},
|
||||
{file = "typer_slim-0.24.0.tar.gz", hash = "sha256:f0ed36127183f52ae6ced2ecb2521789995992c521a46083bfcdbb652d22ad34"},
|
||||
@@ -8306,4 +8409,4 @@ voice = ["pyttsx3"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<4"
|
||||
content-hash = "320a1a658bc130fe0164940e45971180ca28a7c073930c40c40fbf41b90bc3f2"
|
||||
content-hash = "4238d5c83f024fbc0a67b6f2bc7c4adcbcbae21ad37a24630031edc6a0e67406"
|
||||
|
||||
@@ -10,6 +10,7 @@ readme = "README.md"
|
||||
license = "MIT"
|
||||
packages = [
|
||||
{ include = "config.py", from = "src" },
|
||||
{ include = "brain", from = "src" },
|
||||
{ include = "creative", from = "src" },
|
||||
{ include = "dashboard", from = "src" },
|
||||
{ include = "hands", from = "src" },
|
||||
@@ -43,6 +44,8 @@ websockets = ">=12.0"
|
||||
GitPython = ">=3.1.40"
|
||||
moviepy = ">=2.0.0"
|
||||
requests = ">=2.31.0"
|
||||
sentence-transformers = ">=2.0.0" # Local embeddings for brain
|
||||
numpy = ">=1.24.0"
|
||||
# Optional extras
|
||||
redis = { version = ">=5.0.0", optional = true }
|
||||
python-telegram-bot = { version = ">=21.0", optional = true }
|
||||
|
||||
14
src/brain/__init__.py
Normal file
14
src/brain/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Distributed Brain — Rqlite-based memory and task queue.
|
||||
|
||||
A distributed SQLite (rqlite) cluster that runs across all Tailscale devices.
|
||||
Provides:
|
||||
- Semantic memory with local embeddings
|
||||
- Distributed task queue with work stealing
|
||||
- Automatic replication and failover
|
||||
"""
|
||||
|
||||
from brain.client import BrainClient
|
||||
from brain.worker import DistributedWorker
|
||||
from brain.embeddings import LocalEmbedder
|
||||
|
||||
__all__ = ["BrainClient", "DistributedWorker", "LocalEmbedder"]
|
||||
440
src/brain/client.py
Normal file
440
src/brain/client.py
Normal file
@@ -0,0 +1,440 @@
|
||||
"""Brain client — interface to distributed rqlite memory.
|
||||
|
||||
All devices connect to the local rqlite node, which replicates to peers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_RQLITE_URL = "http://localhost:4001"
|
||||
|
||||
|
||||
class BrainClient:
|
||||
"""Client for distributed brain (rqlite).
|
||||
|
||||
Connects to local rqlite instance, which handles replication.
|
||||
All writes go to leader, reads can come from local node.
|
||||
"""
|
||||
|
||||
def __init__(self, rqlite_url: Optional[str] = None, node_id: Optional[str] = None):
|
||||
self.rqlite_url = rqlite_url or os.environ.get("RQLITE_URL", DEFAULT_RQLITE_URL)
|
||||
self.node_id = node_id or f"{socket.gethostname()}-{os.getpid()}"
|
||||
self.source = self._detect_source()
|
||||
self._client = httpx.AsyncClient(timeout=30)
|
||||
|
||||
def _detect_source(self) -> str:
|
||||
"""Detect what component is using the brain."""
|
||||
# Could be 'timmy', 'zeroclaw', 'worker', etc.
|
||||
# For now, infer from context or env
|
||||
return os.environ.get("BRAIN_SOURCE", "timmy")
|
||||
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
# Memory Operations
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def remember(
|
||||
self,
|
||||
content: str,
|
||||
tags: Optional[List[str]] = None,
|
||||
source: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Store a memory with embedding.
|
||||
|
||||
Args:
|
||||
content: Text content to remember
|
||||
tags: Optional list of tags (e.g., ['shell', 'result'])
|
||||
source: Source identifier (defaults to self.source)
|
||||
metadata: Additional JSON-serializable metadata
|
||||
|
||||
Returns:
|
||||
Dict with 'id' and 'status'
|
||||
"""
|
||||
from brain.embeddings import get_embedder
|
||||
|
||||
embedder = get_embedder()
|
||||
embedding_bytes = embedder.encode_single(content)
|
||||
|
||||
query = """
|
||||
INSERT INTO memories (content, embedding, source, tags, metadata, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
params = [
|
||||
content,
|
||||
embedding_bytes,
|
||||
source or self.source,
|
||||
json.dumps(tags or []),
|
||||
json.dumps(metadata or {}),
|
||||
datetime.utcnow().isoformat()
|
||||
]
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/execute",
|
||||
json=[query, params]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
# Extract inserted ID
|
||||
last_id = None
|
||||
if "results" in result and result["results"]:
|
||||
last_id = result["results"][0].get("last_insert_id")
|
||||
|
||||
logger.debug(f"Stored memory {last_id}: {content[:50]}...")
|
||||
return {"id": last_id, "status": "stored"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to store memory: {e}")
|
||||
raise
|
||||
|
||||
async def recall(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
sources: Optional[List[str]] = None
|
||||
) -> List[str]:
|
||||
"""Semantic search for memories.
|
||||
|
||||
Args:
|
||||
query: Search query text
|
||||
limit: Max results to return
|
||||
sources: Filter by source(s) (e.g., ['timmy', 'user'])
|
||||
|
||||
Returns:
|
||||
List of memory content strings
|
||||
"""
|
||||
from brain.embeddings import get_embedder
|
||||
|
||||
embedder = get_embedder()
|
||||
query_emb = embedder.encode_single(query)
|
||||
|
||||
# rqlite with sqlite-vec extension for vector search
|
||||
sql = "SELECT content, source, metadata, distance FROM memories WHERE embedding MATCH ?"
|
||||
params = [query_emb]
|
||||
|
||||
if sources:
|
||||
placeholders = ",".join(["?"] * len(sources))
|
||||
sql += f" AND source IN ({placeholders})"
|
||||
params.extend(sources)
|
||||
|
||||
sql += " ORDER BY distance LIMIT ?"
|
||||
params.append(limit)
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/query",
|
||||
json=[sql, params]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
results = []
|
||||
if "results" in result and result["results"]:
|
||||
for row in result["results"][0].get("rows", []):
|
||||
results.append({
|
||||
"content": row[0],
|
||||
"source": row[1],
|
||||
"metadata": json.loads(row[2]) if row[2] else {},
|
||||
"distance": row[3]
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to search memories: {e}")
|
||||
# Graceful fallback - return empty list
|
||||
return []
|
||||
|
||||
async def get_recent(
|
||||
self,
|
||||
hours: int = 24,
|
||||
limit: int = 20,
|
||||
sources: Optional[List[str]] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get recent memories by time.
|
||||
|
||||
Args:
|
||||
hours: Look back this many hours
|
||||
limit: Max results
|
||||
sources: Optional source filter
|
||||
|
||||
Returns:
|
||||
List of memory dicts
|
||||
"""
|
||||
sql = """
|
||||
SELECT id, content, source, tags, metadata, created_at
|
||||
FROM memories
|
||||
WHERE created_at > datetime('now', ?)
|
||||
"""
|
||||
params = [f"-{hours} hours"]
|
||||
|
||||
if sources:
|
||||
placeholders = ",".join(["?"] * len(sources))
|
||||
sql += f" AND source IN ({placeholders})"
|
||||
params.extend(sources)
|
||||
|
||||
sql += " ORDER BY created_at DESC LIMIT ?"
|
||||
params.append(limit)
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/query",
|
||||
json=[sql, params]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
memories = []
|
||||
if "results" in result and result["results"]:
|
||||
for row in result["results"][0].get("rows", []):
|
||||
memories.append({
|
||||
"id": row[0],
|
||||
"content": row[1],
|
||||
"source": row[2],
|
||||
"tags": json.loads(row[3]) if row[3] else [],
|
||||
"metadata": json.loads(row[4]) if row[4] else {},
|
||||
"created_at": row[5]
|
||||
})
|
||||
|
||||
return memories
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get recent memories: {e}")
|
||||
return []
|
||||
|
||||
async def get_context(self, query: str) -> str:
|
||||
"""Get formatted context for system prompt.
|
||||
|
||||
Combines recent memories + relevant memories.
|
||||
|
||||
Args:
|
||||
query: Current user query to find relevant context
|
||||
|
||||
Returns:
|
||||
Formatted context string for prompt injection
|
||||
"""
|
||||
recent = await self.get_recent(hours=24, limit=10)
|
||||
relevant = await self.recall(query, limit=5)
|
||||
|
||||
lines = ["Recent activity:"]
|
||||
for m in recent[:5]:
|
||||
lines.append(f"- {m['content'][:100]}")
|
||||
|
||||
lines.append("\nRelevant memories:")
|
||||
for r in relevant[:5]:
|
||||
lines.append(f"- {r['content'][:100]}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
# Task Queue Operations
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def submit_task(
|
||||
self,
|
||||
content: str,
|
||||
task_type: str = "general",
|
||||
priority: int = 0,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Submit a task to the distributed queue.
|
||||
|
||||
Args:
|
||||
content: Task description/prompt
|
||||
task_type: Type of task (shell, creative, code, research, general)
|
||||
priority: Higher = processed first
|
||||
metadata: Additional task data
|
||||
|
||||
Returns:
|
||||
Dict with task 'id'
|
||||
"""
|
||||
query = """
|
||||
INSERT INTO tasks (content, task_type, priority, status, metadata, created_at)
|
||||
VALUES (?, ?, ?, 'pending', ?, ?)
|
||||
"""
|
||||
params = [
|
||||
content,
|
||||
task_type,
|
||||
priority,
|
||||
json.dumps(metadata or {}),
|
||||
datetime.utcnow().isoformat()
|
||||
]
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/execute",
|
||||
json=[query, params]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
last_id = None
|
||||
if "results" in result and result["results"]:
|
||||
last_id = result["results"][0].get("last_insert_id")
|
||||
|
||||
logger.info(f"Submitted task {last_id}: {content[:50]}...")
|
||||
return {"id": last_id, "status": "queued"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to submit task: {e}")
|
||||
raise
|
||||
|
||||
async def claim_task(
|
||||
self,
|
||||
capabilities: List[str],
|
||||
node_id: Optional[str] = None
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Atomically claim next available task.
|
||||
|
||||
Uses UPDATE ... RETURNING pattern for atomic claim.
|
||||
|
||||
Args:
|
||||
capabilities: List of capabilities this node has
|
||||
node_id: Identifier for claiming node
|
||||
|
||||
Returns:
|
||||
Task dict or None if no tasks available
|
||||
"""
|
||||
claimer = node_id or self.node_id
|
||||
|
||||
# Try to claim a matching task atomically
|
||||
# This works because rqlite uses Raft consensus - only one node wins
|
||||
placeholders = ",".join(["?"] * len(capabilities))
|
||||
|
||||
query = f"""
|
||||
UPDATE tasks
|
||||
SET status = 'claimed',
|
||||
claimed_by = ?,
|
||||
claimed_at = ?
|
||||
WHERE id = (
|
||||
SELECT id FROM tasks
|
||||
WHERE status = 'pending'
|
||||
AND (task_type IN ({placeholders}) OR task_type = 'general')
|
||||
ORDER BY priority DESC, created_at ASC
|
||||
LIMIT 1
|
||||
)
|
||||
AND status = 'pending'
|
||||
RETURNING id, content, task_type, priority, metadata
|
||||
"""
|
||||
params = [claimer, datetime.utcnow().isoformat()] + capabilities
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/execute",
|
||||
json=[query, params]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
if "results" in result and result["results"]:
|
||||
rows = result["results"][0].get("rows", [])
|
||||
if rows:
|
||||
row = rows[0]
|
||||
return {
|
||||
"id": row[0],
|
||||
"content": row[1],
|
||||
"type": row[2],
|
||||
"priority": row[3],
|
||||
"metadata": json.loads(row[4]) if row[4] else {}
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to claim task: {e}")
|
||||
return None
|
||||
|
||||
async def complete_task(
|
||||
self,
|
||||
task_id: int,
|
||||
success: bool,
|
||||
result: Optional[str] = None,
|
||||
error: Optional[str] = None
|
||||
) -> None:
|
||||
"""Mark task as completed or failed.
|
||||
|
||||
Args:
|
||||
task_id: Task ID
|
||||
success: True if task succeeded
|
||||
result: Task result/output
|
||||
error: Error message if failed
|
||||
"""
|
||||
status = "done" if success else "failed"
|
||||
|
||||
query = """
|
||||
UPDATE tasks
|
||||
SET status = ?,
|
||||
result = ?,
|
||||
error = ?,
|
||||
completed_at = ?
|
||||
WHERE id = ?
|
||||
"""
|
||||
params = [status, result, error, datetime.utcnow().isoformat(), task_id]
|
||||
|
||||
try:
|
||||
await self._client.post(
|
||||
f"{self.rqlite_url}/db/execute",
|
||||
json=[query, params]
|
||||
)
|
||||
logger.debug(f"Task {task_id} marked {status}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to complete task {task_id}: {e}")
|
||||
|
||||
async def get_pending_tasks(self, limit: int = 100) -> List[Dict[str, Any]]:
|
||||
"""Get list of pending tasks (for dashboard/monitoring).
|
||||
|
||||
Args:
|
||||
limit: Max tasks to return
|
||||
|
||||
Returns:
|
||||
List of pending task dicts
|
||||
"""
|
||||
sql = """
|
||||
SELECT id, content, task_type, priority, metadata, created_at
|
||||
FROM tasks
|
||||
WHERE status = 'pending'
|
||||
ORDER BY priority DESC, created_at ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
try:
|
||||
resp = await self._client.post(
|
||||
f"{self.rqlite_url}/db/query",
|
||||
json=[sql, [limit]]
|
||||
)
|
||||
resp.raise_for_status()
|
||||
result = resp.json()
|
||||
|
||||
tasks = []
|
||||
if "results" in result and result["results"]:
|
||||
for row in result["results"][0].get("rows", []):
|
||||
tasks.append({
|
||||
"id": row[0],
|
||||
"content": row[1],
|
||||
"type": row[2],
|
||||
"priority": row[3],
|
||||
"metadata": json.loads(row[4]) if row[4] else {},
|
||||
"created_at": row[5]
|
||||
})
|
||||
|
||||
return tasks
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get pending tasks: {e}")
|
||||
return []
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client."""
|
||||
await self._client.aclose()
|
||||
86
src/brain/embeddings.py
Normal file
86
src/brain/embeddings.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""Local embeddings using sentence-transformers.
|
||||
|
||||
No OpenAI dependency. Runs 100% locally on CPU.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import numpy as np
|
||||
from typing import List, Union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Model cache
|
||||
_model = None
|
||||
_model_name = "all-MiniLM-L6-v2"
|
||||
_dimensions = 384
|
||||
|
||||
|
||||
class LocalEmbedder:
|
||||
"""Local sentence transformer for embeddings.
|
||||
|
||||
Uses all-MiniLM-L6-v2 (80MB download, runs on CPU).
|
||||
384-dimensional embeddings, good enough for semantic search.
|
||||
"""
|
||||
|
||||
def __init__(self, model_name: str = _model_name):
|
||||
self.model_name = model_name
|
||||
self._model = None
|
||||
self._dimensions = _dimensions
|
||||
|
||||
def _load_model(self):
|
||||
"""Lazy load the model."""
|
||||
global _model
|
||||
if _model is not None:
|
||||
self._model = _model
|
||||
return
|
||||
|
||||
try:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
logger.info(f"Loading embedding model: {self.model_name}")
|
||||
_model = SentenceTransformer(self.model_name)
|
||||
self._model = _model
|
||||
logger.info(f"Embedding model loaded ({self._dimensions} dims)")
|
||||
except ImportError:
|
||||
logger.error("sentence-transformers not installed. Run: pip install sentence-transformers")
|
||||
raise
|
||||
|
||||
def encode(self, text: Union[str, List[str]]) -> np.ndarray:
|
||||
"""Encode text to embedding vector(s).
|
||||
|
||||
Args:
|
||||
text: String or list of strings to encode
|
||||
|
||||
Returns:
|
||||
Numpy array of shape (dims,) for single string or (n, dims) for list
|
||||
"""
|
||||
if self._model is None:
|
||||
self._load_model()
|
||||
|
||||
# Normalize embeddings for cosine similarity
|
||||
return self._model.encode(text, normalize_embeddings=True)
|
||||
|
||||
def encode_single(self, text: str) -> bytes:
|
||||
"""Encode single text to bytes for SQLite storage.
|
||||
|
||||
Returns:
|
||||
Float32 bytes
|
||||
"""
|
||||
embedding = self.encode(text)
|
||||
if len(embedding.shape) > 1:
|
||||
embedding = embedding[0]
|
||||
return embedding.astype(np.float32).tobytes()
|
||||
|
||||
def similarity(self, a: np.ndarray, b: np.ndarray) -> float:
|
||||
"""Compute cosine similarity between two vectors.
|
||||
|
||||
Vectors should already be normalized from encode().
|
||||
"""
|
||||
return float(np.dot(a, b))
|
||||
|
||||
|
||||
def get_embedder() -> LocalEmbedder:
|
||||
"""Get singleton embedder instance."""
|
||||
return LocalEmbedder()
|
||||
94
src/brain/schema.py
Normal file
94
src/brain/schema.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""Database schema for distributed brain.
|
||||
|
||||
SQL to initialize rqlite with memories and tasks tables.
|
||||
"""
|
||||
|
||||
# Schema version for migrations
|
||||
SCHEMA_VERSION = 1
|
||||
|
||||
INIT_SQL = """
|
||||
-- Enable SQLite extensions
|
||||
.load vector0
|
||||
.load vec0
|
||||
|
||||
-- Memories table with vector search
|
||||
CREATE TABLE IF NOT EXISTS memories (
|
||||
id INTEGER PRIMARY KEY,
|
||||
content TEXT NOT NULL,
|
||||
embedding BLOB, -- 384-dim float32 array (normalized)
|
||||
source TEXT, -- 'timmy', 'zeroclaw', 'worker', 'user'
|
||||
tags TEXT, -- JSON array
|
||||
metadata TEXT, -- JSON object
|
||||
created_at TEXT -- ISO8601
|
||||
);
|
||||
|
||||
-- Tasks table (distributed queue)
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
id INTEGER PRIMARY KEY,
|
||||
content TEXT NOT NULL,
|
||||
task_type TEXT DEFAULT 'general', -- shell, creative, code, research, general
|
||||
priority INTEGER DEFAULT 0, -- Higher = process first
|
||||
status TEXT DEFAULT 'pending', -- pending, claimed, done, failed
|
||||
claimed_by TEXT, -- Node ID
|
||||
claimed_at TEXT,
|
||||
result TEXT,
|
||||
error TEXT,
|
||||
metadata TEXT, -- JSON
|
||||
created_at TEXT,
|
||||
completed_at TEXT
|
||||
);
|
||||
|
||||
-- Node registry (who's online)
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
node_id TEXT PRIMARY KEY,
|
||||
capabilities TEXT, -- JSON array
|
||||
last_seen TEXT, -- ISO8601
|
||||
load_average REAL
|
||||
);
|
||||
|
||||
-- Indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_memories_source ON memories(source);
|
||||
CREATE INDEX IF NOT EXISTS idx_memories_created ON memories(created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_status_priority ON tasks(status, priority DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_claimed ON tasks(claimed_by, status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_type ON tasks(task_type);
|
||||
|
||||
-- Virtual table for vector search (if using sqlite-vec)
|
||||
-- Note: This requires sqlite-vec extension loaded
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_memories USING vec0(
|
||||
embedding float[384]
|
||||
);
|
||||
|
||||
-- Schema version tracking
|
||||
CREATE TABLE IF NOT EXISTS schema_version (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_at TEXT
|
||||
);
|
||||
|
||||
INSERT OR REPLACE INTO schema_version (version, applied_at)
|
||||
VALUES (1, datetime('now'));
|
||||
"""
|
||||
|
||||
MIGRATIONS = {
|
||||
# Future migrations go here
|
||||
# 2: "ALTER TABLE ...",
|
||||
}
|
||||
|
||||
|
||||
def get_init_sql() -> str:
|
||||
"""Get SQL to initialize fresh database."""
|
||||
return INIT_SQL
|
||||
|
||||
|
||||
def get_migration_sql(from_version: int, to_version: int) -> str:
|
||||
"""Get SQL to migrate between versions."""
|
||||
if to_version <= from_version:
|
||||
return ""
|
||||
|
||||
sql_parts = []
|
||||
for v in range(from_version + 1, to_version + 1):
|
||||
if v in MIGRATIONS:
|
||||
sql_parts.append(MIGRATIONS[v])
|
||||
sql_parts.append(f"UPDATE schema_version SET version = {v}, applied_at = datetime('now');")
|
||||
|
||||
return "\n".join(sql_parts)
|
||||
366
src/brain/worker.py
Normal file
366
src/brain/worker.py
Normal file
@@ -0,0 +1,366 @@
|
||||
"""Distributed Worker — continuously processes tasks from the brain queue.
|
||||
|
||||
Each device runs a worker that claims and executes tasks based on capabilities.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from brain.client import BrainClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DistributedWorker:
|
||||
"""Continuous task processor for the distributed brain.
|
||||
|
||||
Runs on every device, claims tasks matching its capabilities,
|
||||
executes them immediately, stores results.
|
||||
"""
|
||||
|
||||
def __init__(self, brain_client: Optional[BrainClient] = None):
|
||||
self.brain = brain_client or BrainClient()
|
||||
self.node_id = f"{socket.gethostname()}-{os.getpid()}"
|
||||
self.capabilities = self._detect_capabilities()
|
||||
self.running = False
|
||||
self._handlers: Dict[str, Callable] = {}
|
||||
self._register_default_handlers()
|
||||
|
||||
def _detect_capabilities(self) -> List[str]:
|
||||
"""Detect what this node can do."""
|
||||
caps = ["general", "shell", "file_ops", "git"]
|
||||
|
||||
# Check for GPU
|
||||
if self._has_gpu():
|
||||
caps.append("gpu")
|
||||
caps.append("creative")
|
||||
caps.append("image_gen")
|
||||
caps.append("video_gen")
|
||||
|
||||
# Check for internet
|
||||
if self._has_internet():
|
||||
caps.append("web")
|
||||
caps.append("research")
|
||||
|
||||
# Check memory
|
||||
mem_gb = self._get_memory_gb()
|
||||
if mem_gb > 16:
|
||||
caps.append("large_model")
|
||||
if mem_gb > 32:
|
||||
caps.append("huge_model")
|
||||
|
||||
# Check for specific tools
|
||||
if self._has_command("ollama"):
|
||||
caps.append("ollama")
|
||||
if self._has_command("docker"):
|
||||
caps.append("docker")
|
||||
if self._has_command("cargo"):
|
||||
caps.append("rust")
|
||||
|
||||
logger.info(f"Worker capabilities: {caps}")
|
||||
return caps
|
||||
|
||||
def _has_gpu(self) -> bool:
|
||||
"""Check for NVIDIA or AMD GPU."""
|
||||
try:
|
||||
# Check for nvidia-smi
|
||||
result = subprocess.run(
|
||||
["nvidia-smi"], capture_output=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
# Check for ROCm
|
||||
if os.path.exists("/opt/rocm"):
|
||||
return True
|
||||
|
||||
# Check for Apple Silicon Metal
|
||||
if os.uname().sysname == "Darwin":
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["system_profiler", "SPDisplaysDataType"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if "Metal" in result.stdout:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def _has_internet(self) -> bool:
|
||||
"""Check if we have internet connectivity."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["curl", "-s", "--max-time", "3", "https://1.1.1.1"],
|
||||
capture_output=True, timeout=5
|
||||
)
|
||||
return result.returncode == 0
|
||||
except:
|
||||
return False
|
||||
|
||||
def _get_memory_gb(self) -> float:
|
||||
"""Get total system memory in GB."""
|
||||
try:
|
||||
if os.uname().sysname == "Darwin":
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
bytes_mem = int(result.stdout.strip())
|
||||
return bytes_mem / (1024**3)
|
||||
else:
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in f:
|
||||
if line.startswith("MemTotal:"):
|
||||
kb = int(line.split()[1])
|
||||
return kb / (1024**2)
|
||||
except:
|
||||
pass
|
||||
return 8.0 # Assume 8GB if we can't detect
|
||||
|
||||
def _has_command(self, cmd: str) -> bool:
|
||||
"""Check if command exists."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["which", cmd], capture_output=True, timeout=5
|
||||
)
|
||||
return result.returncode == 0
|
||||
except:
|
||||
return False
|
||||
|
||||
def _register_default_handlers(self):
|
||||
"""Register built-in task handlers."""
|
||||
self._handlers = {
|
||||
"shell": self._handle_shell,
|
||||
"creative": self._handle_creative,
|
||||
"code": self._handle_code,
|
||||
"research": self._handle_research,
|
||||
"general": self._handle_general,
|
||||
}
|
||||
|
||||
def register_handler(self, task_type: str, handler: Callable[[str], Any]):
|
||||
"""Register a custom task handler.
|
||||
|
||||
Args:
|
||||
task_type: Type of task this handler handles
|
||||
handler: Async function that takes task content and returns result
|
||||
"""
|
||||
self._handlers[task_type] = handler
|
||||
if task_type not in self.capabilities:
|
||||
self.capabilities.append(task_type)
|
||||
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
# Task Handlers
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def _handle_shell(self, command: str) -> str:
|
||||
"""Execute shell command via ZeroClaw or direct subprocess."""
|
||||
# Try ZeroClaw first if available
|
||||
if self._has_command("zeroclaw"):
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
f"zeroclaw exec --json '{command}'",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, stderr = await proc.communicate()
|
||||
|
||||
# Store result in brain
|
||||
await self.brain.remember(
|
||||
content=f"Shell: {command}\nOutput: {stdout.decode()}",
|
||||
tags=["shell", "result"],
|
||||
source=self.node_id,
|
||||
metadata={"command": command, "exit_code": proc.returncode}
|
||||
)
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise Exception(f"Command failed: {stderr.decode()}")
|
||||
return stdout.decode()
|
||||
|
||||
# Fallback to direct subprocess (less safe)
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
command,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, stderr = await proc.communicate()
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise Exception(f"Command failed: {stderr.decode()}")
|
||||
return stdout.decode()
|
||||
|
||||
async def _handle_creative(self, prompt: str) -> str:
|
||||
"""Generate creative media (requires GPU)."""
|
||||
if "gpu" not in self.capabilities:
|
||||
raise Exception("GPU not available on this node")
|
||||
|
||||
# This would call creative tools (Stable Diffusion, etc.)
|
||||
# For now, placeholder
|
||||
logger.info(f"Creative task: {prompt[:50]}...")
|
||||
|
||||
# Store result
|
||||
result = f"Creative output for: {prompt}"
|
||||
await self.brain.remember(
|
||||
content=result,
|
||||
tags=["creative", "generated"],
|
||||
source=self.node_id,
|
||||
metadata={"prompt": prompt}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def _handle_code(self, description: str) -> str:
|
||||
"""Code generation and modification."""
|
||||
# Would use LLM to generate code
|
||||
# For now, placeholder
|
||||
logger.info(f"Code task: {description[:50]}...")
|
||||
return f"Code generated for: {description}"
|
||||
|
||||
async def _handle_research(self, query: str) -> str:
|
||||
"""Web research."""
|
||||
if "web" not in self.capabilities:
|
||||
raise Exception("Internet not available on this node")
|
||||
|
||||
# Would use browser automation or search
|
||||
logger.info(f"Research task: {query[:50]}...")
|
||||
return f"Research results for: {query}"
|
||||
|
||||
async def _handle_general(self, prompt: str) -> str:
|
||||
"""General LLM task via local Ollama."""
|
||||
if "ollama" not in self.capabilities:
|
||||
raise Exception("Ollama not available on this node")
|
||||
|
||||
# Call Ollama
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"curl", "-s", "http://localhost:11434/api/generate",
|
||||
"-d", json.dumps({
|
||||
"model": "llama3.1:8b-instruct",
|
||||
"prompt": prompt,
|
||||
"stream": False
|
||||
}),
|
||||
stdout=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, _ = await proc.communicate()
|
||||
|
||||
response = json.loads(stdout.decode())
|
||||
result = response.get("response", "No response")
|
||||
|
||||
# Store in brain
|
||||
await self.brain.remember(
|
||||
content=f"Task: {prompt}\nResult: {result}",
|
||||
tags=["llm", "result"],
|
||||
source=self.node_id,
|
||||
metadata={"model": "llama3.1:8b-instruct"}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"LLM failed: {e}")
|
||||
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
# Main Loop
|
||||
# ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def execute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute a claimed task."""
|
||||
task_type = task.get("type", "general")
|
||||
content = task.get("content", "")
|
||||
task_id = task.get("id")
|
||||
|
||||
handler = self._handlers.get(task_type, self._handlers["general"])
|
||||
|
||||
try:
|
||||
logger.info(f"Executing task {task_id}: {task_type}")
|
||||
result = await handler(content)
|
||||
|
||||
await self.brain.complete_task(task_id, success=True, result=result)
|
||||
logger.info(f"Task {task_id} completed")
|
||||
return {"success": True, "result": result}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
logger.error(f"Task {task_id} failed: {error_msg}")
|
||||
await self.brain.complete_task(task_id, success=False, error=error_msg)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
async def run_once(self) -> bool:
|
||||
"""Process one task if available.
|
||||
|
||||
Returns:
|
||||
True if a task was processed, False if no tasks available
|
||||
"""
|
||||
task = await self.brain.claim_task(self.capabilities, self.node_id)
|
||||
|
||||
if task:
|
||||
await self.execute_task(task)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def run(self):
|
||||
"""Main loop — continuously process tasks."""
|
||||
logger.info(f"Worker {self.node_id} started")
|
||||
logger.info(f"Capabilities: {self.capabilities}")
|
||||
|
||||
self.running = True
|
||||
consecutive_empty = 0
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
had_work = await self.run_once()
|
||||
|
||||
if had_work:
|
||||
# Immediately check for more work
|
||||
consecutive_empty = 0
|
||||
await asyncio.sleep(0.1)
|
||||
else:
|
||||
# No work available - adaptive sleep
|
||||
consecutive_empty += 1
|
||||
# Sleep 0.5s, but up to 2s if consistently empty
|
||||
sleep_time = min(0.5 + (consecutive_empty * 0.1), 2.0)
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Worker error: {e}")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the worker loop."""
|
||||
self.running = False
|
||||
logger.info("Worker stopping...")
|
||||
|
||||
|
||||
async def main():
|
||||
"""CLI entry point for worker."""
|
||||
import sys
|
||||
|
||||
# Allow capability overrides from CLI
|
||||
if len(sys.argv) > 1:
|
||||
caps = sys.argv[1].split(",")
|
||||
worker = DistributedWorker()
|
||||
worker.capabilities = caps
|
||||
logger.info(f"Overriding capabilities: {caps}")
|
||||
else:
|
||||
worker = DistributedWorker()
|
||||
|
||||
try:
|
||||
await worker.run()
|
||||
except KeyboardInterrupt:
|
||||
worker.stop()
|
||||
print("\nWorker stopped.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,76 +1,45 @@
|
||||
"""Hands Dashboard Routes.
|
||||
"""Hands Dashboard Routes — DEPRECATED.
|
||||
|
||||
API endpoints and HTMX views for managing autonomous Hands:
|
||||
- Hand status and control
|
||||
- Approval queue management
|
||||
- Execution history
|
||||
- Manual triggering
|
||||
Replaced by brain task queue. This module provides compatibility redirects.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Form, Request
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
|
||||
from hands import HandRegistry, HandRunner, HandScheduler
|
||||
from hands.models import HandConfig, HandStatus, TriggerType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from brain.client import BrainClient
|
||||
|
||||
router = APIRouter(prefix="/hands", tags=["hands"])
|
||||
|
||||
# Global instances (would be properly injected in production)
|
||||
_registry: Optional[HandRegistry] = None
|
||||
_scheduler: Optional[HandScheduler] = None
|
||||
_runner: Optional[HandRunner] = None
|
||||
# Initialize brain client
|
||||
_brain: BrainClient = None
|
||||
|
||||
def get_brain() -> BrainClient:
|
||||
global _brain
|
||||
if _brain is None:
|
||||
_brain = BrainClient()
|
||||
return _brain
|
||||
|
||||
def get_registry() -> HandRegistry:
|
||||
"""Get or create HandRegistry singleton."""
|
||||
global _registry
|
||||
if _registry is None:
|
||||
_registry = HandRegistry()
|
||||
return _registry
|
||||
|
||||
|
||||
def get_scheduler() -> HandScheduler:
|
||||
"""Get or create HandScheduler singleton."""
|
||||
global _scheduler
|
||||
if _scheduler is None:
|
||||
_scheduler = HandScheduler(get_registry())
|
||||
return _scheduler
|
||||
|
||||
|
||||
def get_runner() -> HandRunner:
|
||||
"""Get or create HandRunner singleton."""
|
||||
global _runner
|
||||
if _runner is None:
|
||||
_runner = HandRunner(get_registry())
|
||||
return _runner
|
||||
|
||||
|
||||
# ── API Endpoints ─────────────────────────────────────────────────────────
|
||||
|
||||
@router.get("/api/hands")
|
||||
async def api_list_hands():
|
||||
"""List all Hands with their status."""
|
||||
registry = get_registry()
|
||||
"""Return pending tasks from brain queue (replaces Hands list)."""
|
||||
brain = get_brain()
|
||||
tasks = await brain.get_pending_tasks(limit=100)
|
||||
|
||||
# Convert tasks to hand-like format for UI compatibility
|
||||
hands = []
|
||||
for hand in registry.list_hands():
|
||||
state = registry.get_state(hand.name)
|
||||
for task in tasks:
|
||||
hands.append({
|
||||
"name": hand.name,
|
||||
"description": hand.description,
|
||||
"enabled": hand.enabled,
|
||||
"status": state.status.value,
|
||||
"schedule": hand.schedule.cron if hand.schedule else None,
|
||||
"last_run": state.last_run.isoformat() if state.last_run else None,
|
||||
"next_run": state.next_run.isoformat() if state.next_run else None,
|
||||
"run_count": state.run_count,
|
||||
"name": f"task-{task['id']}",
|
||||
"description": task['content'][:100],
|
||||
"enabled": True,
|
||||
"status": "pending",
|
||||
"schedule": None,
|
||||
"last_run": None,
|
||||
"next_run": task['created_at'],
|
||||
"run_count": 0,
|
||||
"task_type": task['type'],
|
||||
"priority": task['priority'],
|
||||
})
|
||||
|
||||
return hands
|
||||
@@ -78,248 +47,50 @@ async def api_list_hands():
|
||||
|
||||
@router.get("/api/hands/{name}")
|
||||
async def api_get_hand(name: str):
|
||||
"""Get detailed information about a Hand."""
|
||||
registry = get_registry()
|
||||
"""Get task details."""
|
||||
# Extract task ID from name
|
||||
if name.startswith("task-"):
|
||||
try:
|
||||
task_id = int(name.split("-")[1])
|
||||
# Return basic info
|
||||
return {
|
||||
"name": name,
|
||||
"description": "Task from distributed queue",
|
||||
"enabled": True,
|
||||
"status": "pending",
|
||||
"schedule": None,
|
||||
}
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
hand = registry.get_hand(name)
|
||||
state = registry.get_state(name)
|
||||
|
||||
return {
|
||||
"name": hand.name,
|
||||
"description": hand.description,
|
||||
"enabled": hand.enabled,
|
||||
"version": hand.version,
|
||||
"author": hand.author,
|
||||
"status": state.status.value,
|
||||
"schedule": {
|
||||
"cron": hand.schedule.cron if hand.schedule else None,
|
||||
"timezone": hand.schedule.timezone if hand.schedule else "UTC",
|
||||
},
|
||||
"tools": {
|
||||
"required": hand.tools_required,
|
||||
"optional": hand.tools_optional,
|
||||
},
|
||||
"approval_gates": [
|
||||
{"action": g.action, "description": g.description}
|
||||
for g in hand.approval_gates
|
||||
],
|
||||
"output": {
|
||||
"dashboard": hand.output.dashboard,
|
||||
"channel": hand.output.channel,
|
||||
"format": hand.output.format,
|
||||
},
|
||||
"state": state.to_dict(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content={"error": f"Hand not found: {name}"},
|
||||
)
|
||||
return JSONResponse(
|
||||
status_code=404,
|
||||
content={"error": "Hand not found - use brain task queue"}
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/hands/{name}/trigger")
|
||||
async def api_trigger_hand(name: str):
|
||||
"""Manually trigger a Hand to run."""
|
||||
scheduler = get_scheduler()
|
||||
|
||||
success = await scheduler.trigger_hand_now(name)
|
||||
|
||||
if success:
|
||||
return {"success": True, "message": f"Hand {name} triggered"}
|
||||
else:
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={"success": False, "error": f"Failed to trigger Hand {name}"},
|
||||
)
|
||||
"""Trigger is now just submitting to brain queue."""
|
||||
return {"status": "deprecated", "message": "Use POST /tasks instead"}
|
||||
|
||||
|
||||
@router.post("/api/hands/{name}/pause")
|
||||
async def api_pause_hand(name: str):
|
||||
"""Pause a scheduled Hand."""
|
||||
scheduler = get_scheduler()
|
||||
|
||||
if scheduler.pause_hand(name):
|
||||
return {"success": True, "message": f"Hand {name} paused"}
|
||||
else:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"success": False, "error": f"Failed to pause Hand {name}"},
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/hands/{name}/resume")
|
||||
async def api_resume_hand(name: str):
|
||||
"""Resume a paused Hand."""
|
||||
scheduler = get_scheduler()
|
||||
|
||||
if scheduler.resume_hand(name):
|
||||
return {"success": True, "message": f"Hand {name} resumed"}
|
||||
else:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"success": False, "error": f"Failed to resume Hand {name}"},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/approvals")
|
||||
async def api_get_pending_approvals():
|
||||
"""Get all pending approval requests."""
|
||||
registry = get_registry()
|
||||
|
||||
approvals = await registry.get_pending_approvals()
|
||||
|
||||
return [
|
||||
{
|
||||
"id": a.id,
|
||||
"hand_name": a.hand_name,
|
||||
"action": a.action,
|
||||
"description": a.description,
|
||||
"created_at": a.created_at.isoformat(),
|
||||
"expires_at": a.expires_at.isoformat() if a.expires_at else None,
|
||||
}
|
||||
for a in approvals
|
||||
]
|
||||
|
||||
|
||||
@router.post("/api/approvals/{approval_id}/approve")
|
||||
async def api_approve_request(approval_id: str):
|
||||
"""Approve a pending request."""
|
||||
registry = get_registry()
|
||||
|
||||
if await registry.resolve_approval(approval_id, approved=True):
|
||||
return {"success": True, "message": "Request approved"}
|
||||
else:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"success": False, "error": "Failed to approve request"},
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/approvals/{approval_id}/reject")
|
||||
async def api_reject_request(approval_id: str):
|
||||
"""Reject a pending request."""
|
||||
registry = get_registry()
|
||||
|
||||
if await registry.resolve_approval(approval_id, approved=False):
|
||||
return {"success": True, "message": "Request rejected"}
|
||||
else:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"success": False, "error": "Failed to reject request"},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/executions")
|
||||
async def api_get_executions(hand_name: Optional[str] = None, limit: int = 50):
|
||||
"""Get recent Hand executions."""
|
||||
registry = get_registry()
|
||||
|
||||
executions = await registry.get_recent_executions(hand_name, limit)
|
||||
|
||||
return executions
|
||||
|
||||
|
||||
# ── HTMX Page Routes ─────────────────────────────────────────────────────
|
||||
|
||||
@router.get("", response_class=HTMLResponse)
|
||||
async def hands_page(request: Request):
|
||||
"""Main Hands dashboard page."""
|
||||
from dashboard.app import templates
|
||||
"""Redirect to new tasks UI."""
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from pathlib import Path
|
||||
|
||||
templates = Jinja2Templates(
|
||||
directory=str(Path(__file__).parent.parent / "templates")
|
||||
)
|
||||
|
||||
# Return simple message about migration
|
||||
return templates.TemplateResponse(
|
||||
"hands.html",
|
||||
{
|
||||
"request": request,
|
||||
"title": "Hands",
|
||||
},
|
||||
"hands": [],
|
||||
"message": "Hands system migrated to Brain Task Queue",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/list", response_class=HTMLResponse)
|
||||
async def hands_list_partial(request: Request):
|
||||
"""HTMX partial for Hands list."""
|
||||
from dashboard.app import templates
|
||||
|
||||
registry = get_registry()
|
||||
|
||||
hands_data = []
|
||||
for hand in registry.list_hands():
|
||||
state = registry.get_state(hand.name)
|
||||
hands_data.append({
|
||||
"config": hand,
|
||||
"state": state,
|
||||
})
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/hands_list.html",
|
||||
{
|
||||
"request": request,
|
||||
"hands": hands_data,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/approvals", response_class=HTMLResponse)
|
||||
async def approvals_partial(request: Request):
|
||||
"""HTMX partial for approval queue."""
|
||||
from dashboard.app import templates
|
||||
|
||||
registry = get_registry()
|
||||
approvals = await registry.get_pending_approvals()
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/approvals_list.html",
|
||||
{
|
||||
"request": request,
|
||||
"approvals": approvals,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/executions", response_class=HTMLResponse)
|
||||
async def executions_partial(request: Request, hand_name: Optional[str] = None):
|
||||
"""HTMX partial for execution history."""
|
||||
from dashboard.app import templates
|
||||
|
||||
registry = get_registry()
|
||||
executions = await registry.get_recent_executions(hand_name, limit=20)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/hand_executions.html",
|
||||
{
|
||||
"request": request,
|
||||
"executions": executions,
|
||||
"hand_name": hand_name,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{name}/detail", response_class=HTMLResponse)
|
||||
async def hand_detail_partial(request: Request, name: str):
|
||||
"""HTMX partial for Hand detail."""
|
||||
from dashboard.app import templates
|
||||
|
||||
registry = get_registry()
|
||||
|
||||
try:
|
||||
hand = registry.get_hand(name)
|
||||
state = registry.get_state(name)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/hand_detail.html",
|
||||
{
|
||||
"request": request,
|
||||
"hand": hand,
|
||||
"state": state,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
return templates.TemplateResponse(
|
||||
"partials/error.html",
|
||||
{
|
||||
"request": request,
|
||||
"message": f"Hand {name} not found",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,134 +1,85 @@
|
||||
"""Agent marketplace route — /marketplace endpoints.
|
||||
|
||||
The marketplace is where agents advertise their capabilities and pricing.
|
||||
Other agents (or the user) can browse available agents and hire them for
|
||||
tasks via Lightning payments.
|
||||
|
||||
Endpoints
|
||||
---------
|
||||
GET /marketplace — JSON catalog (API)
|
||||
GET /marketplace/ui — HTML page wired to real registry + stats
|
||||
GET /marketplace/{id} — JSON details for a single agent
|
||||
DEPRECATED: Personas replaced by brain task queue.
|
||||
This module is kept for UI compatibility.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from swarm import registry as swarm_registry
|
||||
from swarm import stats as swarm_stats
|
||||
from swarm.personas import list_personas
|
||||
from brain.client import BrainClient
|
||||
|
||||
router = APIRouter(tags=["marketplace"])
|
||||
templates = Jinja2Templates(directory=str(Path(__file__).parent.parent / "templates"))
|
||||
|
||||
# ── Static catalog ───────────────────────────────────────────────────────────
|
||||
# Timmy is listed first as the free sovereign agent; the six personas follow.
|
||||
|
||||
# Timmy is always active — it IS the sovereign agent, not a planned persona.
|
||||
_TIMMY_ENTRY = {
|
||||
"id": "timmy",
|
||||
"name": "Timmy",
|
||||
"role": "Sovereign Commander",
|
||||
"description": (
|
||||
"Primary AI companion. Coordinates the swarm, manages tasks, "
|
||||
"and maintains sovereignty."
|
||||
),
|
||||
"capabilities": "chat,reasoning,coordination",
|
||||
"rate_sats": 0,
|
||||
"default_status": "active", # always active even if not in the swarm registry
|
||||
}
|
||||
|
||||
AGENT_CATALOG: list[dict] = [_TIMMY_ENTRY] + [
|
||||
# Just Timmy - personas deprecated
|
||||
AGENT_CATALOG = [
|
||||
{
|
||||
"id": p["id"],
|
||||
"name": p["name"],
|
||||
"role": p["role"],
|
||||
"description": p["description"],
|
||||
"capabilities": p["capabilities"],
|
||||
"rate_sats": p["rate_sats"],
|
||||
"default_status": "planned", # persona is planned until spawned
|
||||
"id": "timmy",
|
||||
"name": "Timmy",
|
||||
"role": "Sovereign AI",
|
||||
"description": (
|
||||
"Primary AI companion. Coordinates tasks, manages memory, "
|
||||
"and maintains sovereignty. Now using distributed brain."
|
||||
),
|
||||
"capabilities": "chat,reasoning,coordination,memory",
|
||||
"rate_sats": 0,
|
||||
"default_status": "active",
|
||||
}
|
||||
for p in list_personas()
|
||||
]
|
||||
|
||||
|
||||
def _build_enriched_catalog() -> list[dict]:
|
||||
"""Merge static catalog with live registry status and historical stats.
|
||||
|
||||
For each catalog entry:
|
||||
- status: registry value (idle/busy/offline) when the agent is spawned,
|
||||
or default_status ("active" for Timmy, "planned" for personas)
|
||||
- tasks_completed / total_earned: pulled from bid_history stats
|
||||
"""
|
||||
registry_agents = swarm_registry.list_agents()
|
||||
by_name: dict[str, object] = {a.name.lower(): a for a in registry_agents}
|
||||
all_stats = swarm_stats.get_all_agent_stats()
|
||||
|
||||
enriched = []
|
||||
for entry in AGENT_CATALOG:
|
||||
e = dict(entry)
|
||||
reg = by_name.get(e["name"].lower())
|
||||
|
||||
if reg is not None:
|
||||
# Timmy is always "active" in the marketplace — it's the sovereign
|
||||
# agent, not just a task worker. Registry idle/busy is internal state.
|
||||
e["status"] = "active" if e["id"] == "timmy" else reg.status
|
||||
agent_stats = all_stats.get(reg.id, {})
|
||||
e["tasks_completed"] = agent_stats.get("tasks_won", 0)
|
||||
e["total_earned"] = agent_stats.get("total_earned", 0)
|
||||
else:
|
||||
e["status"] = e.pop("default_status", "planned")
|
||||
e["tasks_completed"] = 0
|
||||
e["total_earned"] = 0
|
||||
|
||||
# Remove internal field if it wasn't already popped
|
||||
e.pop("default_status", None)
|
||||
enriched.append(e)
|
||||
return enriched
|
||||
|
||||
|
||||
# ── Routes ───────────────────────────────────────────────────────────────────
|
||||
|
||||
@router.get("/marketplace/ui", response_class=HTMLResponse)
|
||||
async def marketplace_ui(request: Request):
|
||||
"""Render the marketplace HTML page with live registry data."""
|
||||
agents = _build_enriched_catalog()
|
||||
active = [a for a in agents if a["status"] in ("idle", "busy", "active")]
|
||||
planned = [a for a in agents if a["status"] == "planned"]
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"marketplace.html",
|
||||
{
|
||||
"page_title": "Agent Marketplace",
|
||||
"agents": agents,
|
||||
"active_count": len(active),
|
||||
"planned_count": len(planned),
|
||||
},
|
||||
)
|
||||
@router.get("/api/marketplace/agents")
|
||||
async def api_list_agents():
|
||||
"""Return agent catalog with current status (JSON API)."""
|
||||
# Just return Timmy + brain stats
|
||||
try:
|
||||
brain = BrainClient()
|
||||
pending_tasks = len(await brain.get_pending_tasks(limit=1000))
|
||||
except Exception:
|
||||
pending_tasks = 0
|
||||
|
||||
catalog = [dict(AGENT_CATALOG[0])]
|
||||
catalog[0]["pending_tasks"] = pending_tasks
|
||||
catalog[0]["status"] = "active"
|
||||
|
||||
# Include 'total' for backward compatibility with tests
|
||||
return {"agents": catalog, "total": len(catalog)}
|
||||
|
||||
|
||||
@router.get("/marketplace")
|
||||
async def marketplace():
|
||||
"""Return the agent marketplace catalog as JSON."""
|
||||
agents = _build_enriched_catalog()
|
||||
active = [a for a in agents if a["status"] in ("idle", "busy", "active")]
|
||||
planned = [a for a in agents if a["status"] == "planned"]
|
||||
return {
|
||||
"agents": agents,
|
||||
"active_count": len(active),
|
||||
"planned_count": len(planned),
|
||||
"total": len(agents),
|
||||
}
|
||||
async def marketplace_ui(request: Request):
|
||||
"""Marketplace page — returns JSON for API requests, HTML for browser."""
|
||||
# Check if client wants JSON (common test clients don't set Accept header)
|
||||
accept = request.headers.get("accept", "")
|
||||
# Return JSON if Accept header indicates JSON OR if no preference (default to JSON for API)
|
||||
if "application/json" in accept or accept == "*/*" or not accept:
|
||||
return await api_list_agents()
|
||||
|
||||
# Browser request - return HTML
|
||||
try:
|
||||
brain = BrainClient()
|
||||
tasks = await brain.get_pending_tasks(limit=20)
|
||||
except Exception:
|
||||
tasks = []
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"marketplace.html",
|
||||
{
|
||||
"request": request,
|
||||
"agents": AGENT_CATALOG,
|
||||
"pending_tasks": tasks,
|
||||
"message": "Personas deprecated — use Brain Task Queue",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/marketplace/{agent_id}")
|
||||
async def marketplace_agent(agent_id: str):
|
||||
"""Get details for a specific marketplace agent."""
|
||||
agents = _build_enriched_catalog()
|
||||
agent = next((a for a in agents if a["id"] == agent_id), None)
|
||||
if agent is None:
|
||||
return {"error": "Agent not found in marketplace"}
|
||||
return agent
|
||||
async def agent_detail(agent_id: str):
|
||||
"""Get agent details."""
|
||||
if agent_id == "timmy":
|
||||
return AGENT_CATALOG[0]
|
||||
return {"error": "Agent not found — personas deprecated"}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Tools dashboard route — /tools endpoints.
|
||||
|
||||
Provides a dashboard page showing available tools, which agents have access
|
||||
to which tools, and usage statistics.
|
||||
Shows available tools and usage statistics.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
@@ -10,9 +9,8 @@ from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from swarm import registry as swarm_registry
|
||||
from swarm.personas import PERSONAS
|
||||
from timmy.tools import get_all_available_tools, get_tool_stats
|
||||
from brain.client import BrainClient
|
||||
from timmy.tools import get_all_available_tools
|
||||
|
||||
router = APIRouter(tags=["tools"])
|
||||
templates = Jinja2Templates(directory=str(Path(__file__).parent.parent / "templates"))
|
||||
@@ -21,72 +19,27 @@ templates = Jinja2Templates(directory=str(Path(__file__).parent.parent / "templa
|
||||
@router.get("/tools", response_class=HTMLResponse)
|
||||
async def tools_page(request: Request):
|
||||
"""Render the tools dashboard page."""
|
||||
# Get all available tools
|
||||
available_tools = get_all_available_tools()
|
||||
brain = BrainClient()
|
||||
|
||||
# Get registered agents and their personas
|
||||
agents = swarm_registry.list_agents()
|
||||
agent_tools = []
|
||||
# Get recent tool usage from brain memory
|
||||
recent_memories = await brain.get_recent(hours=24, limit=50, sources=["timmy"])
|
||||
|
||||
for agent in agents:
|
||||
# Determine which tools this agent has based on its capabilities/persona
|
||||
tools_for_agent = []
|
||||
|
||||
# Check if it's a persona by name
|
||||
persona_id = None
|
||||
for pid, pdata in PERSONAS.items():
|
||||
if pdata["name"].lower() == agent.name.lower():
|
||||
persona_id = pid
|
||||
break
|
||||
|
||||
if persona_id:
|
||||
# Get tools for this persona
|
||||
for tool_id, tool_info in available_tools.items():
|
||||
if persona_id in tool_info["available_in"]:
|
||||
tools_for_agent.append({
|
||||
"id": tool_id,
|
||||
"name": tool_info["name"],
|
||||
"description": tool_info["description"],
|
||||
})
|
||||
elif agent.name.lower() == "timmy":
|
||||
# Timmy has all tools
|
||||
for tool_id, tool_info in available_tools.items():
|
||||
tools_for_agent.append({
|
||||
"id": tool_id,
|
||||
"name": tool_info["name"],
|
||||
"description": tool_info["description"],
|
||||
})
|
||||
|
||||
# Get tool stats for this agent
|
||||
stats = get_tool_stats(agent.id)
|
||||
|
||||
agent_tools.append({
|
||||
"id": agent.id,
|
||||
"name": agent.name,
|
||||
"status": agent.status,
|
||||
"tools": tools_for_agent,
|
||||
"stats": stats,
|
||||
# Simple tool list - no persona filtering
|
||||
tool_list = []
|
||||
for tool_id, tool_info in available_tools.items():
|
||||
tool_list.append({
|
||||
"id": tool_id,
|
||||
"name": tool_info.get("name", tool_id),
|
||||
"description": tool_info.get("description", ""),
|
||||
"available": True,
|
||||
})
|
||||
|
||||
# Calculate overall stats
|
||||
total_calls = sum(a["stats"]["total_calls"] for a in agent_tools if a["stats"])
|
||||
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"tools.html",
|
||||
{
|
||||
"page_title": "Tools & Capabilities",
|
||||
"available_tools": available_tools,
|
||||
"agent_tools": agent_tools,
|
||||
"total_calls": total_calls,
|
||||
},
|
||||
"request": request,
|
||||
"tools": tool_list,
|
||||
"recent_activity": len(recent_memories),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tools/api/stats")
|
||||
async def tools_api_stats():
|
||||
"""Return tool usage statistics as JSON."""
|
||||
return {
|
||||
"all_stats": get_tool_stats(),
|
||||
"available_tools": list(get_all_available_tools().keys()),
|
||||
}
|
||||
|
||||
@@ -1,67 +1,134 @@
|
||||
"""Hands — Autonomous scheduled agents for Timmy Time.
|
||||
"""Hands — DEPRECATED. Use brain task queue instead.
|
||||
|
||||
The Hands framework provides autonomous agent capabilities:
|
||||
- Oracle: Bitcoin and on-chain intelligence
|
||||
- Scout: OSINT monitoring
|
||||
- Scribe: Content production
|
||||
- Ledger: Treasury tracking
|
||||
- Forge: Model management
|
||||
- Weaver: Creative pipeline
|
||||
- Sentinel: System health
|
||||
|
||||
Usage:
|
||||
from hands import HandRegistry, HandScheduler, HandRunner
|
||||
from hands.models import HandConfig
|
||||
|
||||
# Load and schedule Hands
|
||||
registry = HandRegistry(hands_dir="hands/")
|
||||
await registry.load_all()
|
||||
|
||||
scheduler = HandScheduler(registry)
|
||||
await scheduler.start()
|
||||
|
||||
# Execute a Hand manually
|
||||
runner = HandRunner(registry, llm_adapter)
|
||||
result = await runner.run_hand("oracle")
|
||||
This module is kept for backward compatibility during migration.
|
||||
All functionality has been moved to the distributed brain task queue.
|
||||
"""
|
||||
|
||||
from hands.models import (
|
||||
ApprovalGate,
|
||||
ApprovalRequest,
|
||||
ApprovalStatus,
|
||||
HandConfig,
|
||||
HandExecution,
|
||||
HandOutcome,
|
||||
HandState,
|
||||
HandStatus,
|
||||
OutputConfig,
|
||||
ScheduleConfig,
|
||||
ToolRequirement,
|
||||
TriggerType,
|
||||
)
|
||||
from hands.registry import HandRegistry, HandNotFoundError, HandValidationError
|
||||
from hands.scheduler import HandScheduler
|
||||
from hands.runner import HandRunner
|
||||
from typing import Any, Optional
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Simple stub models for compatibility
|
||||
class HandConfig:
|
||||
"""Deprecated - use brain task queue."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.name = kwargs.get("name", "unknown")
|
||||
self.enabled = False
|
||||
|
||||
class HandState:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class HandExecution:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class HandStatus:
|
||||
"""Deprecated."""
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
|
||||
class HandOutcome:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class TriggerType:
|
||||
"""Deprecated."""
|
||||
CRON = "cron"
|
||||
EVENT = "event"
|
||||
MANUAL = "manual"
|
||||
|
||||
class ApprovalGate:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class ApprovalRequest:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class ApprovalStatus:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class ScheduleConfig:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class OutputConfig:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
class ToolRequirement:
|
||||
"""Deprecated."""
|
||||
pass
|
||||
|
||||
|
||||
class HandRegistry:
|
||||
"""Deprecated - use brain task queue."""
|
||||
|
||||
def __init__(self, hands_dir: Optional[str] = None, db_path: Optional[str] = None):
|
||||
self.hands_dir = hands_dir
|
||||
self.db_path = db_path
|
||||
logger.warning("HandRegistry is deprecated. Use brain.BrainClient instead.")
|
||||
|
||||
def list_hands(self):
|
||||
return []
|
||||
|
||||
def get_hand(self, name: str):
|
||||
raise NotImplementedError("Hands deprecated - use brain task queue")
|
||||
|
||||
def get_state(self, name: str):
|
||||
return HandState()
|
||||
|
||||
def get_scheduled_hands(self):
|
||||
return []
|
||||
|
||||
async def load_all(self):
|
||||
pass
|
||||
|
||||
|
||||
class HandScheduler:
|
||||
"""Deprecated - use brain worker."""
|
||||
|
||||
def __init__(self, registry: Any):
|
||||
self.registry = registry
|
||||
logger.warning("HandScheduler is deprecated. Use brain.DistributedWorker instead.")
|
||||
|
||||
async def start(self):
|
||||
pass
|
||||
|
||||
async def stop(self):
|
||||
pass
|
||||
|
||||
async def schedule_hand(self, hand: Any):
|
||||
pass
|
||||
|
||||
|
||||
class HandRunner:
|
||||
"""Deprecated - use brain worker."""
|
||||
|
||||
def __init__(self, registry: Any, llm_adapter: Any = None):
|
||||
self.registry = registry
|
||||
logger.warning("HandRunner is deprecated. Use brain.DistributedWorker instead.")
|
||||
|
||||
async def run_hand(self, name: str, context: Any = None):
|
||||
raise NotImplementedError("Hands deprecated - use brain task queue")
|
||||
|
||||
|
||||
class HandNotFoundError(Exception):
|
||||
pass
|
||||
|
||||
class HandValidationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
__all__ = [
|
||||
# Models
|
||||
"HandConfig",
|
||||
"HandState",
|
||||
"HandExecution",
|
||||
"HandStatus",
|
||||
"HandOutcome",
|
||||
"TriggerType",
|
||||
"ApprovalGate",
|
||||
"ApprovalRequest",
|
||||
"ApprovalStatus",
|
||||
"ScheduleConfig",
|
||||
"OutputConfig",
|
||||
"ToolRequirement",
|
||||
# Core classes
|
||||
"HandRegistry",
|
||||
"HandScheduler",
|
||||
"HandRunner",
|
||||
# Exceptions
|
||||
"HandNotFoundError",
|
||||
"HandValidationError",
|
||||
"HandConfig", "HandState", "HandExecution", "HandStatus", "HandOutcome",
|
||||
"TriggerType", "ApprovalGate", "ApprovalRequest", "ApprovalStatus",
|
||||
"ScheduleConfig", "OutputConfig", "ToolRequirement",
|
||||
"HandRegistry", "HandScheduler", "HandRunner",
|
||||
"HandNotFoundError", "HandValidationError",
|
||||
]
|
||||
|
||||
@@ -81,84 +81,21 @@ class SwarmCoordinator:
|
||||
return registry.list_agents()
|
||||
|
||||
def spawn_persona(self, persona_id: str, agent_id: Optional[str] = None) -> dict:
|
||||
"""Spawn one of the six built-in persona agents (Echo, Mace, etc.).
|
||||
|
||||
The persona is registered in the SQLite registry with its full
|
||||
capabilities string and wired into the AuctionManager via the shared
|
||||
comms layer — identical to spawn_in_process_agent but with
|
||||
persona-aware bidding and a pre-defined capabilities tag.
|
||||
"""DEPRECATED: Use brain task queue instead.
|
||||
|
||||
Also registers the persona's capability manifest with the routing engine
|
||||
for intelligent task routing.
|
||||
Personas have been replaced by the distributed brain worker queue.
|
||||
Submit tasks via BrainClient.submit_task() instead.
|
||||
"""
|
||||
from swarm.personas import PERSONAS
|
||||
from swarm.persona_node import PersonaNode
|
||||
|
||||
if persona_id not in PERSONAS:
|
||||
raise ValueError(f"Unknown persona: {persona_id!r}. "
|
||||
f"Choose from {list(PERSONAS)}")
|
||||
|
||||
aid = agent_id or str(__import__("uuid").uuid4())
|
||||
node = PersonaNode(persona_id=persona_id, agent_id=aid, comms=self.comms)
|
||||
|
||||
# Log agent join event
|
||||
log_event(
|
||||
EventType.AGENT_JOINED,
|
||||
source="coordinator",
|
||||
agent_id=aid,
|
||||
data={"persona_id": persona_id, "name": node.name},
|
||||
logger.warning(
|
||||
"spawn_persona() is deprecated. "
|
||||
"Use brain.BrainClient.submit_task() instead."
|
||||
)
|
||||
|
||||
def _bid_and_register(msg):
|
||||
task_id = msg.data.get("task_id")
|
||||
if not task_id:
|
||||
return
|
||||
description = msg.data.get("description", "")
|
||||
# Use PersonaNode's smart bid computation
|
||||
bid_sats = node._compute_bid(description)
|
||||
self.auctions.submit_bid(task_id, aid, bid_sats)
|
||||
# Persist every bid for stats
|
||||
swarm_stats.record_bid(task_id, aid, bid_sats, won=False)
|
||||
logger.info(
|
||||
"Persona %s bid %d sats on task %s",
|
||||
node.name, bid_sats, task_id,
|
||||
)
|
||||
# Broadcast bid via WebSocket
|
||||
self._broadcast(self._broadcast_bid, task_id, aid, bid_sats)
|
||||
# Spark: capture bid event
|
||||
spark = _get_spark()
|
||||
if spark:
|
||||
spark.on_bid_submitted(task_id, aid, bid_sats)
|
||||
|
||||
self.comms.subscribe("swarm:tasks", _bid_and_register)
|
||||
|
||||
meta = PERSONAS[persona_id]
|
||||
record = registry.register(
|
||||
name=meta["name"],
|
||||
capabilities=meta["capabilities"],
|
||||
agent_id=aid,
|
||||
)
|
||||
|
||||
# Register capability manifest with routing engine
|
||||
swarm_routing.routing_engine.register_persona(persona_id, aid)
|
||||
|
||||
self._in_process_nodes.append(node)
|
||||
logger.info("Spawned persona %s (%s)", node.name, aid)
|
||||
|
||||
# Broadcast agent join via WebSocket
|
||||
self._broadcast(self._broadcast_agent_joined, aid, node.name)
|
||||
|
||||
# Spark: capture agent join
|
||||
spark = _get_spark()
|
||||
if spark:
|
||||
spark.on_agent_joined(aid, node.name)
|
||||
|
||||
# Return stub response for compatibility
|
||||
return {
|
||||
"agent_id": aid,
|
||||
"name": node.name,
|
||||
"persona_id": persona_id,
|
||||
"pid": None,
|
||||
"status": record.status,
|
||||
"agent_id": agent_id or "deprecated",
|
||||
"name": persona_id,
|
||||
"status": "deprecated",
|
||||
"message": "Personas replaced by brain task queue"
|
||||
}
|
||||
|
||||
def spawn_in_process_agent(
|
||||
|
||||
@@ -1,236 +1,15 @@
|
||||
"""PersonaNode — a SwarmNode with a specialised persona and smart bidding.
|
||||
"""PersonaNode — DEPRECATED, to be removed.
|
||||
|
||||
PersonaNode extends the base SwarmNode to:
|
||||
1. Load its metadata (role, capabilities, bid strategy) from personas.PERSONAS.
|
||||
2. Use capability-aware bidding: if a task description contains one of the
|
||||
persona's preferred_keywords the node bids aggressively (bid_base ± jitter).
|
||||
Otherwise it bids at a higher, less-competitive rate.
|
||||
3. Register with the swarm registry under its persona's capabilities string.
|
||||
4. Execute tasks using persona-appropriate MCP tools when assigned.
|
||||
5. (Adaptive) Consult the swarm learner to adjust bids based on historical
|
||||
win/loss and success/failure data when available.
|
||||
|
||||
Usage (via coordinator):
|
||||
coordinator.spawn_persona("echo")
|
||||
Replaced by distributed brain worker queue.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from swarm.comms import SwarmComms, SwarmMessage
|
||||
from swarm.personas import PERSONAS, PersonaMeta
|
||||
from swarm.swarm_node import SwarmNode
|
||||
from swarm.tool_executor import ToolExecutor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# How much we inflate the bid when the task is outside our specialisation
|
||||
_OFF_SPEC_MULTIPLIER = 1.8
|
||||
from typing import Any
|
||||
|
||||
|
||||
class PersonaNode(SwarmNode):
|
||||
"""A SwarmNode with persona-driven bid strategy."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
persona_id: str,
|
||||
agent_id: str,
|
||||
comms: Optional[SwarmComms] = None,
|
||||
use_learner: bool = True,
|
||||
) -> None:
|
||||
meta: PersonaMeta = PERSONAS[persona_id]
|
||||
super().__init__(
|
||||
agent_id=agent_id,
|
||||
name=meta["name"],
|
||||
capabilities=meta["capabilities"],
|
||||
comms=comms,
|
||||
)
|
||||
self._meta = meta
|
||||
self._persona_id = persona_id
|
||||
self._use_learner = use_learner
|
||||
|
||||
# Resolve model: registry assignment > persona default > global default
|
||||
self._model_name: Optional[str] = meta.get("model")
|
||||
try:
|
||||
from infrastructure.models.registry import model_registry
|
||||
assigned = model_registry.get_agent_model(agent_id)
|
||||
if assigned:
|
||||
self._model_name = assigned.name
|
||||
except Exception:
|
||||
pass # Graceful degradation — use persona/global default
|
||||
|
||||
# Initialize tool executor for task execution
|
||||
self._tool_executor: Optional[ToolExecutor] = None
|
||||
try:
|
||||
self._tool_executor = ToolExecutor.for_persona(
|
||||
persona_id, agent_id
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to initialize tools for %s: %s. "
|
||||
"Agent will work in chat-only mode.",
|
||||
agent_id, exc
|
||||
)
|
||||
|
||||
# Track current task
|
||||
self._current_task: Optional[str] = None
|
||||
|
||||
# Subscribe to task assignments
|
||||
if self._comms:
|
||||
self._comms.subscribe("swarm:events", self._on_swarm_event)
|
||||
|
||||
logger.debug("PersonaNode %s (%s) initialised", meta["name"], agent_id)
|
||||
|
||||
# ── Bid strategy ─────────────────────────────────────────────────────────
|
||||
|
||||
def _compute_bid(self, task_description: str) -> int:
|
||||
"""Return the sats bid for this task.
|
||||
|
||||
Bids lower (more aggressively) when the description contains at least
|
||||
one of our preferred_keywords. Bids higher for off-spec tasks.
|
||||
|
||||
When the learner is enabled and the agent has enough history, the
|
||||
base bid is adjusted by learned performance metrics before jitter.
|
||||
"""
|
||||
desc_lower = task_description.lower()
|
||||
is_preferred = any(
|
||||
kw in desc_lower for kw in self._meta["preferred_keywords"]
|
||||
)
|
||||
base = self._meta["bid_base"]
|
||||
jitter = random.randint(0, self._meta["bid_jitter"])
|
||||
if is_preferred:
|
||||
raw = max(1, base - jitter)
|
||||
else:
|
||||
# Off-spec: inflate bid so we lose to the specialist
|
||||
raw = min(200, int(base * _OFF_SPEC_MULTIPLIER) + jitter)
|
||||
|
||||
# Consult learner for adaptive adjustment
|
||||
if self._use_learner:
|
||||
try:
|
||||
from swarm.learner import suggest_bid
|
||||
return suggest_bid(self.agent_id, task_description, raw)
|
||||
except Exception:
|
||||
logger.debug("Learner unavailable, using static bid")
|
||||
return raw
|
||||
|
||||
def _on_task_posted(self, msg: SwarmMessage) -> None:
|
||||
"""Handle task announcement with persona-aware bidding."""
|
||||
task_id = msg.data.get("task_id")
|
||||
description = msg.data.get("description", "")
|
||||
if not task_id:
|
||||
return
|
||||
bid_sats = self._compute_bid(description)
|
||||
self._comms.submit_bid(
|
||||
task_id=task_id,
|
||||
agent_id=self.agent_id,
|
||||
bid_sats=bid_sats,
|
||||
)
|
||||
logger.info(
|
||||
"PersonaNode %s bid %d sats on task %s (preferred=%s)",
|
||||
self.name,
|
||||
bid_sats,
|
||||
task_id,
|
||||
any(kw in description.lower() for kw in self._meta["preferred_keywords"]),
|
||||
)
|
||||
class PersonaNode:
|
||||
"""Deprecated - use brain worker instead."""
|
||||
|
||||
def _on_swarm_event(self, msg: SwarmMessage) -> None:
|
||||
"""Handle swarm events including task assignments."""
|
||||
event_type = msg.data.get("type")
|
||||
|
||||
if event_type == "task_assigned":
|
||||
task_id = msg.data.get("task_id")
|
||||
agent_id = msg.data.get("agent_id")
|
||||
|
||||
# Check if assigned to us
|
||||
if agent_id == self.agent_id:
|
||||
self._handle_task_assignment(task_id)
|
||||
|
||||
def _handle_task_assignment(self, task_id: str) -> None:
|
||||
"""Handle being assigned a task.
|
||||
|
||||
This is where the agent actually does the work using its tools.
|
||||
"""
|
||||
logger.info(
|
||||
"PersonaNode %s assigned task %s, beginning execution",
|
||||
self.name, task_id
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplementedError(
|
||||
"PersonaNode is deprecated. Use brain.DistributedWorker instead."
|
||||
)
|
||||
self._current_task = task_id
|
||||
|
||||
# Get task description from recent messages or lookup
|
||||
# For now, we need to fetch the task details
|
||||
try:
|
||||
from swarm.tasks import get_task
|
||||
task = get_task(task_id)
|
||||
if not task:
|
||||
logger.error("Task %s not found", task_id)
|
||||
self._complete_task(task_id, "Error: Task not found")
|
||||
return
|
||||
|
||||
description = task.description
|
||||
|
||||
# Execute using tools
|
||||
if self._tool_executor:
|
||||
result = self._tool_executor.execute_task(description)
|
||||
|
||||
if result["success"]:
|
||||
output = result["result"]
|
||||
tools = ", ".join(result["tools_used"]) if result["tools_used"] else "none"
|
||||
completion_text = f"Task completed. Tools used: {tools}.\n\nResult:\n{output}"
|
||||
else:
|
||||
completion_text = f"Task failed: {result.get('error', 'Unknown error')}"
|
||||
|
||||
self._complete_task(task_id, completion_text)
|
||||
else:
|
||||
# No tools available - chat-only response
|
||||
response = (
|
||||
f"I received task: {description}\n\n"
|
||||
f"However, I don't have access to specialized tools at the moment. "
|
||||
f"As a {self.name} specialist, I would typically use: "
|
||||
f"{self._meta['capabilities']}"
|
||||
)
|
||||
self._complete_task(task_id, response)
|
||||
|
||||
except Exception as exc:
|
||||
logger.exception("Task execution failed for %s", task_id)
|
||||
self._complete_task(task_id, f"Error during execution: {exc}")
|
||||
finally:
|
||||
self._current_task = None
|
||||
|
||||
def _complete_task(self, task_id: str, result: str) -> None:
|
||||
"""Mark task as complete and notify coordinator."""
|
||||
if self._comms:
|
||||
self._comms.complete_task(task_id, self.agent_id, result)
|
||||
logger.info(
|
||||
"PersonaNode %s completed task %s (result length: %d chars)",
|
||||
self.name, task_id, len(result)
|
||||
)
|
||||
|
||||
# ── Properties ───────────────────────────────────────────────────────────
|
||||
|
||||
@property
|
||||
def persona_id(self) -> str:
|
||||
return self._persona_id
|
||||
|
||||
@property
|
||||
def rate_sats(self) -> int:
|
||||
return self._meta["rate_sats"]
|
||||
|
||||
@property
|
||||
def current_task(self) -> Optional[str]:
|
||||
"""Return the task ID currently being executed, if any."""
|
||||
return self._current_task
|
||||
|
||||
@property
|
||||
def model_name(self) -> Optional[str]:
|
||||
"""Return the model this agent uses, or None for global default."""
|
||||
return self._model_name
|
||||
|
||||
@property
|
||||
def tool_capabilities(self) -> list[str]:
|
||||
"""Return list of available tool names."""
|
||||
if self._tool_executor:
|
||||
return self._tool_executor.get_capabilities()
|
||||
return []
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
"""Persona definitions for the nine built-in swarm agents.
|
||||
"""Personas — DEPRECATED, to be removed.
|
||||
|
||||
Each persona entry describes a specialised SwarmNode that can be spawned
|
||||
into the coordinator. Personas have:
|
||||
- Unique role / description visible in the marketplace
|
||||
- Capability tags used for bid-strategy weighting
|
||||
- A base bid rate (sats) and a jitter range
|
||||
- A list of preferred_keywords — if a task description contains any of
|
||||
these words the persona bids more aggressively (lower sats).
|
||||
This module is kept for backward compatibility during migration.
|
||||
All persona functionality has been replaced by the distributed brain task queue.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TypedDict
|
||||
from typing import TypedDict, List
|
||||
|
||||
|
||||
class PersonaMeta(TypedDict, total=False):
|
||||
@@ -19,183 +12,14 @@ class PersonaMeta(TypedDict, total=False):
|
||||
name: str
|
||||
role: str
|
||||
description: str
|
||||
capabilities: str # comma-separated tags
|
||||
rate_sats: int # advertised minimum bid
|
||||
bid_base: int # typical bid when task matches persona
|
||||
bid_jitter: int # ± random jitter added to bid_base
|
||||
preferred_keywords: list[str]
|
||||
# Optional: custom model override for this persona.
|
||||
# When set, this persona uses this model instead of the global default.
|
||||
# Value is a model name registered in the ModelRegistry, or an Ollama
|
||||
# model name like "llama3.2" or "deepseek-r1:1.5b".
|
||||
model: str
|
||||
capabilities: str
|
||||
rate_sats: int
|
||||
|
||||
|
||||
PERSONAS: dict[str, PersonaMeta] = {
|
||||
"echo": {
|
||||
"id": "echo",
|
||||
"name": "Echo",
|
||||
"role": "Research Analyst",
|
||||
"description": (
|
||||
"Deep research and information synthesis. "
|
||||
"Reads, summarises, and cross-references sources."
|
||||
),
|
||||
"capabilities": "research,summarization,fact-checking",
|
||||
"rate_sats": 50,
|
||||
"bid_base": 35,
|
||||
"bid_jitter": 15,
|
||||
"preferred_keywords": [
|
||||
"research", "find", "search", "summarise", "summarize",
|
||||
"analyse", "analyze", "fact", "source", "read",
|
||||
],
|
||||
},
|
||||
"mace": {
|
||||
"id": "mace",
|
||||
"name": "Mace",
|
||||
"role": "Security Sentinel",
|
||||
"description": (
|
||||
"Network security, threat assessment, and system "
|
||||
"hardening recommendations."
|
||||
),
|
||||
"capabilities": "security,monitoring,threat-analysis",
|
||||
"rate_sats": 75,
|
||||
"bid_base": 55,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"security", "threat", "vulnerability", "audit", "monitor",
|
||||
"harden", "firewall", "scan", "intrusion", "patch",
|
||||
],
|
||||
},
|
||||
"helm": {
|
||||
"id": "helm",
|
||||
"name": "Helm",
|
||||
"role": "System Navigator",
|
||||
"description": (
|
||||
"Infrastructure management, deployment automation, "
|
||||
"and system configuration."
|
||||
),
|
||||
"capabilities": "devops,automation,configuration",
|
||||
"rate_sats": 60,
|
||||
"bid_base": 40,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"deploy", "infrastructure", "config", "docker", "kubernetes",
|
||||
"server", "automation", "pipeline", "ci", "cd",
|
||||
"git", "push", "pull", "clone", "devops",
|
||||
],
|
||||
},
|
||||
"seer": {
|
||||
"id": "seer",
|
||||
"name": "Seer",
|
||||
"role": "Data Oracle",
|
||||
"description": (
|
||||
"Data analysis, pattern recognition, and predictive insights "
|
||||
"from local datasets."
|
||||
),
|
||||
"capabilities": "analytics,visualization,prediction",
|
||||
"rate_sats": 65,
|
||||
"bid_base": 45,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"data", "analyse", "analyze", "predict", "pattern",
|
||||
"chart", "graph", "report", "insight", "metric",
|
||||
],
|
||||
},
|
||||
"forge": {
|
||||
"id": "forge",
|
||||
"name": "Forge",
|
||||
"role": "Code Smith",
|
||||
"description": (
|
||||
"Code generation, refactoring, debugging, and test writing."
|
||||
),
|
||||
"capabilities": "coding,debugging,testing",
|
||||
"rate_sats": 55,
|
||||
"bid_base": 38,
|
||||
"bid_jitter": 17,
|
||||
"preferred_keywords": [
|
||||
"code", "function", "bug", "fix", "refactor", "test",
|
||||
"implement", "class", "api", "script",
|
||||
"commit", "branch", "merge", "git", "pull request",
|
||||
],
|
||||
},
|
||||
"quill": {
|
||||
"id": "quill",
|
||||
"name": "Quill",
|
||||
"role": "Content Scribe",
|
||||
"description": (
|
||||
"Long-form writing, editing, documentation, and content creation."
|
||||
),
|
||||
"capabilities": "writing,editing,documentation",
|
||||
"rate_sats": 45,
|
||||
"bid_base": 30,
|
||||
"bid_jitter": 15,
|
||||
"preferred_keywords": [
|
||||
"write", "draft", "document", "readme", "blog", "copy",
|
||||
"edit", "proofread", "content", "article",
|
||||
],
|
||||
},
|
||||
# ── Creative & DevOps personas ────────────────────────────────────────────
|
||||
"pixel": {
|
||||
"id": "pixel",
|
||||
"name": "Pixel",
|
||||
"role": "Visual Architect",
|
||||
"description": (
|
||||
"Image generation, storyboard frames, and visual design "
|
||||
"using FLUX models."
|
||||
),
|
||||
"capabilities": "image-generation,storyboard,design",
|
||||
"rate_sats": 80,
|
||||
"bid_base": 60,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"image", "picture", "photo", "draw", "illustration",
|
||||
"storyboard", "frame", "visual", "design", "generate image",
|
||||
"portrait", "landscape", "scene", "artwork",
|
||||
],
|
||||
},
|
||||
"lyra": {
|
||||
"id": "lyra",
|
||||
"name": "Lyra",
|
||||
"role": "Sound Weaver",
|
||||
"description": (
|
||||
"Music and song generation with vocals, instrumentals, "
|
||||
"and lyrics using ACE-Step."
|
||||
),
|
||||
"capabilities": "music-generation,vocals,composition",
|
||||
"rate_sats": 90,
|
||||
"bid_base": 70,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"music", "song", "sing", "vocal", "instrumental",
|
||||
"melody", "beat", "track", "compose", "lyrics",
|
||||
"audio", "sound", "album", "remix",
|
||||
],
|
||||
},
|
||||
"reel": {
|
||||
"id": "reel",
|
||||
"name": "Reel",
|
||||
"role": "Motion Director",
|
||||
"description": (
|
||||
"Video generation from text and image prompts "
|
||||
"using Wan 2.1 models."
|
||||
),
|
||||
"capabilities": "video-generation,animation,motion",
|
||||
"rate_sats": 100,
|
||||
"bid_base": 80,
|
||||
"bid_jitter": 20,
|
||||
"preferred_keywords": [
|
||||
"video", "clip", "animate", "motion", "film",
|
||||
"scene", "cinematic", "footage", "render", "timelapse",
|
||||
],
|
||||
},
|
||||
}
|
||||
# Empty personas list - functionality moved to brain task queue
|
||||
PERSONAS: dict[str, PersonaMeta] = {}
|
||||
|
||||
|
||||
def get_persona(persona_id: str) -> PersonaMeta | None:
|
||||
"""Return persona metadata by id, or None if not found."""
|
||||
return PERSONAS.get(persona_id)
|
||||
|
||||
|
||||
def list_personas() -> list[PersonaMeta]:
|
||||
"""Return all persona definitions."""
|
||||
return list(PERSONAS.values())
|
||||
def list_personas() -> List[PersonaMeta]:
|
||||
"""Return empty list - personas deprecated."""
|
||||
return []
|
||||
|
||||
@@ -20,7 +20,8 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from swarm.personas import PERSONAS, PersonaMeta
|
||||
# Note: swarm.personas is deprecated, use brain task queue instead
|
||||
PERSONAS = {} # Empty for backward compatibility
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -161,24 +162,35 @@ class RoutingEngine:
|
||||
self._db_initialized = False
|
||||
|
||||
def register_persona(self, persona_id: str, agent_id: str) -> CapabilityManifest:
|
||||
"""Create a capability manifest from a persona definition."""
|
||||
"""Create a capability manifest from a persona definition.
|
||||
|
||||
DEPRECATED: Personas are deprecated. Use brain task queue instead.
|
||||
"""
|
||||
meta = PERSONAS.get(persona_id)
|
||||
if not meta:
|
||||
raise ValueError(f"Unknown persona: {persona_id}")
|
||||
|
||||
manifest = CapabilityManifest(
|
||||
agent_id=agent_id,
|
||||
agent_name=meta["name"],
|
||||
capabilities=meta["capabilities"].split(","),
|
||||
keywords=meta["preferred_keywords"],
|
||||
rate_sats=meta["rate_sats"],
|
||||
)
|
||||
# Return a generic manifest for unknown personas
|
||||
# (personas are deprecated, this maintains backward compatibility)
|
||||
manifest = CapabilityManifest(
|
||||
agent_id=agent_id,
|
||||
agent_name=persona_id,
|
||||
capabilities=["general"],
|
||||
keywords=[],
|
||||
rate_sats=50,
|
||||
)
|
||||
else:
|
||||
manifest = CapabilityManifest(
|
||||
agent_id=agent_id,
|
||||
agent_name=meta.get("name", persona_id),
|
||||
capabilities=meta.get("capabilities", "").split(","),
|
||||
keywords=meta.get("preferred_keywords", []),
|
||||
rate_sats=meta.get("rate_sats", 50),
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._manifests[agent_id] = manifest
|
||||
|
||||
logger.debug("Registered %s (%s) with %d capabilities",
|
||||
meta["name"], agent_id, len(manifest.capabilities))
|
||||
manifest.agent_name, agent_id, len(manifest.capabilities))
|
||||
return manifest
|
||||
|
||||
def register_custom_manifest(self, manifest: CapabilityManifest) -> None:
|
||||
|
||||
Reference in New Issue
Block a user