feat: one-click cloud deployment — Caddy HTTPS, Ollama, systemd, cloud-init

Add complete production deployment stack so Timmy can be deployed to any
cloud provider (DigitalOcean, AWS, Hetzner, etc.) with a single command.

New files:
- docker-compose.prod.yml: production stack (Caddy auto-HTTPS, Ollama LLM,
  Dashboard, Timmy agent, Watchtower auto-updates)
- deploy/Caddyfile: reverse proxy with security headers and WebSocket support
- deploy/setup.sh: interactive one-click setup script for any Ubuntu/Debian server
- deploy/cloud-init.yaml: paste as User Data when creating a cloud VM
- deploy/timmy.service: systemd unit for auto-start on boot
- deploy/digitalocean/create-droplet.sh: create a DO droplet via doctl CLI

Updated:
- Dockerfile: non-root user, healthcheck, missing deps (GitPython, moviepy, redis)
- Makefile: cloud-deploy, cloud-up/down/logs/status/update/scale targets
- .env.example: DOMAIN setting for HTTPS
- .dockerignore: exclude deploy configs from image

https://claude.ai/code/session_018CduUZoEJzFynBwMsxaP8T
This commit is contained in:
Claude
2026-02-24 21:22:56 +00:00
parent 7018a756b3
commit b7cfb3b097
10 changed files with 815 additions and 4 deletions

View File

@@ -33,5 +33,10 @@ tests/
docs/
*.md
# ── Deploy configs (not needed inside image) ──────────────────────────────────
deploy/
docker-compose*.yml
Makefile
# ── macOS ─────────────────────────────────────────────────────────────────────
.DS_Store

View File

@@ -1,9 +1,17 @@
# Timmy Time — Mission Control
# Copy this file to .env and uncomment lines you want to override.
# .env is gitignored and never committed.
#
# For cloud deployment, deploy/setup.sh generates this automatically.
# ── Cloud / Production ──────────────────────────────────────────────────────
# Your domain for automatic HTTPS via Let's Encrypt.
# Set to your actual domain (e.g., timmy.example.com) for HTTPS.
# Leave as "localhost" for IP-only HTTP access.
# DOMAIN=localhost
# Ollama host (default: http://localhost:11434)
# Override if Ollama is running on another machine or port.
# In production (docker-compose.prod.yml), this is set to http://ollama:11434 automatically.
# OLLAMA_URL=http://localhost:11434
# LLM model to use via Ollama (default: llama3.2)

View File

@@ -11,7 +11,7 @@
# timmy-time:latest \
# python -m swarm.agent_runner --agent-id w1 --name Worker-1
FROM python:3.12-slim
FROM python:3.12-slim AS base
# ── System deps ──────────────────────────────────────────────────────────────
RUN apt-get update && apt-get install -y --no-install-recommends \
@@ -38,7 +38,10 @@ RUN pip install --no-cache-dir \
"agno[sqlite]>=1.4.0" \
"ollama>=0.3.0" \
"openai>=1.0.0" \
"python-telegram-bot>=21.0"
"python-telegram-bot>=21.0" \
"GitPython>=3.1.40" \
"moviepy>=2.0.0" \
"redis>=5.0.0"
# ── Application source ───────────────────────────────────────────────────────
COPY src/ ./src/
@@ -47,6 +50,11 @@ COPY static/ ./static/
# Create data directory (mounted as a volume in production)
RUN mkdir -p /app/data
# ── Non-root user for production ─────────────────────────────────────────────
RUN groupadd -r timmy && useradd -r -g timmy -d /app -s /sbin/nologin timmy \
&& chown -R timmy:timmy /app
USER timmy
# ── Environment ──────────────────────────────────────────────────────────────
ENV PYTHONPATH=/app/src
ENV PYTHONUNBUFFERED=1
@@ -54,5 +62,9 @@ ENV PYTHONDONTWRITEBYTECODE=1
EXPOSE 8000
# ── Healthcheck ──────────────────────────────────────────────────────────────
HEALTHCHECK --interval=30s --timeout=5s --start-period=15s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# ── Default: run the dashboard ───────────────────────────────────────────────
CMD ["uvicorn", "dashboard.app:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -1,5 +1,6 @@
.PHONY: install install-bigbrain dev test test-cov test-cov-html watch lint clean help \
docker-build docker-up docker-down docker-agent docker-logs docker-shell
docker-build docker-up docker-down docker-agent docker-logs docker-shell \
cloud-deploy cloud-up cloud-down cloud-logs cloud-status cloud-update
VENV := .venv
PYTHON := $(VENV)/bin/python
@@ -95,6 +96,45 @@ docker-logs:
docker-shell:
docker compose exec dashboard bash
# ── Cloud Deploy ─────────────────────────────────────────────────────────────
# One-click production deployment (run on your cloud server)
cloud-deploy:
@bash deploy/setup.sh
# Start the production stack (Caddy + Ollama + Dashboard + Timmy)
cloud-up:
docker compose -f docker-compose.prod.yml up -d
# Stop the production stack
cloud-down:
docker compose -f docker-compose.prod.yml down
# Tail production logs
cloud-logs:
docker compose -f docker-compose.prod.yml logs -f
# Show status of all production containers
cloud-status:
docker compose -f docker-compose.prod.yml ps
# Pull latest code and rebuild
cloud-update:
git pull
docker compose -f docker-compose.prod.yml up -d --build
# Create a DigitalOcean droplet (requires doctl CLI)
cloud-droplet:
@bash deploy/digitalocean/create-droplet.sh
# Scale agent workers in production: make cloud-scale N=4
cloud-scale:
docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=$${N:-2}
# Pull a model into Ollama: make cloud-pull-model MODEL=llama3.2
cloud-pull-model:
docker exec timmy-ollama ollama pull $${MODEL:-llama3.2}
# ── Housekeeping ──────────────────────────────────────────────────────────────
clean:
@@ -105,6 +145,8 @@ clean:
help:
@echo ""
@echo " Local Development"
@echo " ─────────────────────────────────────────────────"
@echo " make install create venv + install dev deps"
@echo " make install-bigbrain install with AirLLM (big-model backend)"
@echo " make dev start dashboard at http://localhost:8000"
@@ -116,6 +158,8 @@ help:
@echo " make lint run ruff or flake8"
@echo " make clean remove build artefacts and caches"
@echo ""
@echo " Docker (Dev)"
@echo " ─────────────────────────────────────────────────"
@echo " make docker-build build the timmy-time:latest image"
@echo " make docker-up start dashboard container"
@echo " make docker-agent add one agent worker (AGENT_NAME=Echo)"
@@ -123,3 +167,15 @@ help:
@echo " make docker-logs tail container logs"
@echo " make docker-shell open a bash shell in the dashboard container"
@echo ""
@echo " Cloud Deploy (Production)"
@echo " ─────────────────────────────────────────────────"
@echo " make cloud-deploy one-click server setup (run as root)"
@echo " make cloud-up start production stack"
@echo " make cloud-down stop production stack"
@echo " make cloud-logs tail production logs"
@echo " make cloud-status show container status"
@echo " make cloud-update pull + rebuild from git"
@echo " make cloud-droplet create DigitalOcean droplet (needs doctl)"
@echo " make cloud-scale N=4 scale agent workers"
@echo " make cloud-pull-model MODEL=llama3.2 pull LLM model"
@echo ""

36
deploy/Caddyfile Normal file
View File

@@ -0,0 +1,36 @@
# ── Timmy Time — Caddy Reverse Proxy ─────────────────────────────────────────
#
# Automatic HTTPS via Let's Encrypt.
# Set DOMAIN env var or replace {$DOMAIN} below.
#
# For local/IP-only access (no domain), Caddy serves on :80 without TLS.
{$DOMAIN:localhost} {
# Reverse proxy to the FastAPI dashboard
reverse_proxy dashboard:8000
# WebSocket support (swarm live updates)
@websocket {
header Connection *Upgrade*
header Upgrade websocket
}
reverse_proxy @websocket dashboard:8000
# Security headers
header {
X-Content-Type-Options nosniff
X-Frame-Options SAMEORIGIN
Referrer-Policy strict-origin-when-cross-origin
X-XSS-Protection "1; mode=block"
-Server
}
# Gzip compression
encode gzip zstd
# Access logging
log {
output stdout
format console
}
}

117
deploy/cloud-init.yaml Normal file
View File

@@ -0,0 +1,117 @@
#cloud-config
# ── Timmy Time — Cloud-Init Bootstrap ────────────────────────────────────────
#
# Paste this as "User Data" when creating a DigitalOcean Droplet, AWS EC2
# instance, Hetzner server, Vultr instance, or any cloud VM.
#
# What it does:
# 1. Installs Docker + Docker Compose
# 2. Configures firewall (SSH + HTTP + HTTPS only)
# 3. Clones the Timmy repo to /opt/timmy
# 4. Pulls the default LLM model
# 5. Starts the full production stack
# 6. Enables auto-start on reboot via systemd
#
# After boot (~3-5 min), access: https://<your-ip> or https://<your-domain>
#
# Prerequisites:
# - Point your domain's A record to this server's IP (for auto-HTTPS)
# - Or access via IP (Caddy will serve HTTP only)
package_update: true
package_upgrade: true
packages:
- curl
- git
- ufw
- fail2ban
- unattended-upgrades
write_files:
# Timmy environment config — edit after first boot if needed
- path: /opt/timmy/.env
permissions: "0600"
content: |
# ── Timmy Time — Production Environment ──────────────────────────
# Edit this file, then: systemctl restart timmy
# Your domain (required for auto-HTTPS). Use IP for HTTP-only.
DOMAIN=localhost
# LLM model (pulled automatically on first boot)
OLLAMA_MODEL=llama3.2
# Generate secrets:
# python3 -c "import secrets; print(secrets.token_hex(32))"
L402_HMAC_SECRET=
L402_MACAROON_SECRET=
# Telegram bot token (optional)
TELEGRAM_TOKEN=
# Systemd service file
- path: /etc/systemd/system/timmy.service
permissions: "0644"
content: |
[Unit]
Description=Timmy Time — Mission Control
After=docker.service network-online.target
Requires=docker.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/timmy
EnvironmentFile=-/opt/timmy/.env
ExecStart=/usr/bin/docker compose -f docker-compose.prod.yml up -d
ExecStop=/usr/bin/docker compose -f docker-compose.prod.yml down
ExecReload=/usr/bin/docker compose -f docker-compose.prod.yml restart
Restart=on-failure
RestartSec=30
[Install]
WantedBy=multi-user.target
runcmd:
# ── Install Docker ─────────────────────────────────────────────────────────
- curl -fsSL https://get.docker.com | sh
- systemctl enable docker
- systemctl start docker
# ── Firewall ───────────────────────────────────────────────────────────────
- ufw default deny incoming
- ufw default allow outgoing
- ufw allow 22/tcp # SSH
- ufw allow 80/tcp # HTTP
- ufw allow 443/tcp # HTTPS
- ufw allow 443/udp # HTTP/3
- ufw --force enable
# ── Fail2ban ───────────────────────────────────────────────────────────────
- systemctl enable fail2ban
- systemctl start fail2ban
# ── Clone and deploy ───────────────────────────────────────────────────────
- git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git /opt/timmy
- cd /opt/timmy && mkdir -p data
# ── Build and start ────────────────────────────────────────────────────────
- cd /opt/timmy && docker compose -f docker-compose.prod.yml build
- cd /opt/timmy && docker compose -f docker-compose.prod.yml up -d
# ── Pull default LLM model ────────────────────────────────────────────────
- |
echo "Waiting for Ollama to be ready..."
for i in $(seq 1 30); do
if curl -sf http://localhost:11434/api/tags > /dev/null 2>&1; then
break
fi
sleep 5
done
docker exec timmy-ollama ollama pull llama3.2
# ── Enable auto-start on boot ──────────────────────────────────────────────
- systemctl daemon-reload
- systemctl enable timmy

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env bash
set -euo pipefail
# ── Timmy Time — DigitalOcean Droplet Creator ────────────────────────────────
#
# Creates a DigitalOcean Droplet with Timmy pre-installed via cloud-init.
#
# Prerequisites:
# - doctl CLI installed (https://docs.digitalocean.com/reference/doctl/)
# - doctl auth init (authenticated)
#
# Usage:
# bash deploy/digitalocean/create-droplet.sh
# bash deploy/digitalocean/create-droplet.sh --domain timmy.example.com
# bash deploy/digitalocean/create-droplet.sh --size s-2vcpu-4gb --region nyc1
BOLD='\033[1m'
GREEN='\033[0;32m'
CYAN='\033[0;36m'
NC='\033[0m'
# Defaults
DROPLET_NAME="timmy-mission-control"
REGION="nyc1"
SIZE="s-2vcpu-4gb" # 2 vCPU, 4GB RAM — good for llama3.2
IMAGE="ubuntu-24-04-x64"
DOMAIN=""
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--name) DROPLET_NAME="$2"; shift 2 ;;
--region) REGION="$2"; shift 2 ;;
--size) SIZE="$2"; shift 2 ;;
--domain) DOMAIN="$2"; shift 2 ;;
*) echo "Unknown option: $1"; exit 1 ;;
esac
done
# Check doctl
if ! command -v doctl &> /dev/null; then
echo "Error: doctl is not installed."
echo "Install it: https://docs.digitalocean.com/reference/doctl/how-to/install/"
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CLOUD_INIT="$SCRIPT_DIR/../cloud-init.yaml"
if [ ! -f "$CLOUD_INIT" ]; then
echo "Error: cloud-init.yaml not found at $CLOUD_INIT"
exit 1
fi
echo -e "${CYAN}${BOLD}"
echo " Creating DigitalOcean Droplet"
echo " ─────────────────────────────"
echo -e "${NC}"
echo " Name: $DROPLET_NAME"
echo " Region: $REGION"
echo " Size: $SIZE"
echo " Image: $IMAGE"
echo ""
# Create the droplet
DROPLET_ID=$(doctl compute droplet create "$DROPLET_NAME" \
--region "$REGION" \
--size "$SIZE" \
--image "$IMAGE" \
--user-data-file "$CLOUD_INIT" \
--enable-monitoring \
--format ID \
--no-header \
--wait)
echo -e "${GREEN}[+]${NC} Droplet created: ID $DROPLET_ID"
# Get the IP
sleep 5
IP=$(doctl compute droplet get "$DROPLET_ID" --format PublicIPv4 --no-header)
echo -e "${GREEN}[+]${NC} Public IP: $IP"
# Set up DNS if domain provided
if [ -n "$DOMAIN" ]; then
# Extract the base domain (last two parts)
BASE_DOMAIN=$(echo "$DOMAIN" | awk -F. '{print $(NF-1)"."$NF}')
SUBDOMAIN=$(echo "$DOMAIN" | sed "s/\.$BASE_DOMAIN$//")
if [ "$SUBDOMAIN" = "$DOMAIN" ]; then
SUBDOMAIN="@"
fi
echo -e "${GREEN}[+]${NC} Creating DNS record: $DOMAIN -> $IP"
doctl compute domain records create "$BASE_DOMAIN" \
--record-type A \
--record-name "$SUBDOMAIN" \
--record-data "$IP" \
--record-ttl 300 || echo " (DNS record creation failed — set it manually)"
fi
echo ""
echo -e "${GREEN}${BOLD} Droplet is provisioning!${NC}"
echo ""
echo " The server will be ready in ~3-5 minutes."
echo ""
echo " SSH in: ssh root@$IP"
echo " Check progress: ssh root@$IP tail -f /var/log/cloud-init-output.log"
if [ -n "$DOMAIN" ]; then
echo " Dashboard: https://$DOMAIN (after DNS propagation)"
fi
echo " Dashboard: http://$IP"
echo ""
echo " After boot, edit /opt/timmy/.env to set your domain and secrets."
echo ""

282
deploy/setup.sh Executable file
View File

@@ -0,0 +1,282 @@
#!/usr/bin/env bash
set -euo pipefail
# ── Timmy Time — One-Click Deploy Script ─────────────────────────────────────
#
# Run this on any fresh Ubuntu/Debian server:
#
# curl -fsSL https://raw.githubusercontent.com/AlexanderWhitestone/Timmy-time-dashboard/master/deploy/setup.sh | bash
#
# Or clone first and run locally:
#
# git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git
# cd Timmy-time-dashboard
# bash deploy/setup.sh
#
# What it does:
# 1. Installs Docker (if not present)
# 2. Configures firewall
# 3. Generates secrets
# 4. Builds and starts the full stack
# 5. Pulls the LLM model
# 6. Sets up auto-start on boot
BOLD='\033[1m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
RED='\033[0;31m'
NC='\033[0m'
INSTALL_DIR="/opt/timmy"
banner() {
echo ""
echo -e "${CYAN}${BOLD}"
echo " ╔══════════════════════════════════════════╗"
echo " ║ Timmy Time — Mission Control ║"
echo " ║ One-Click Cloud Deploy ║"
echo " ╚══════════════════════════════════════════╝"
echo -e "${NC}"
}
info() { echo -e "${GREEN}[+]${NC} $1"; }
warn() { echo -e "${YELLOW}[!]${NC} $1"; }
error() { echo -e "${RED}[x]${NC} $1"; }
step() { echo -e "\n${BOLD}── $1 ──${NC}"; }
check_root() {
if [ "$(id -u)" -ne 0 ]; then
error "This script must be run as root (or with sudo)"
exit 1
fi
}
generate_secret() {
python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || \
openssl rand -hex 32 2>/dev/null || \
head -c 32 /dev/urandom | xxd -p -c 64
}
install_docker() {
step "Installing Docker"
if command -v docker &> /dev/null; then
info "Docker already installed: $(docker --version)"
else
info "Installing Docker..."
curl -fsSL https://get.docker.com | sh
systemctl enable docker
systemctl start docker
info "Docker installed: $(docker --version)"
fi
# Ensure docker compose plugin is available
if ! docker compose version &> /dev/null; then
error "Docker Compose plugin not found. Please install it manually."
exit 1
fi
info "Docker Compose: $(docker compose version --short)"
}
setup_firewall() {
step "Configuring Firewall"
if command -v ufw &> /dev/null; then
ufw default deny incoming
ufw default allow outgoing
ufw allow 22/tcp # SSH
ufw allow 80/tcp # HTTP
ufw allow 443/tcp # HTTPS
ufw allow 443/udp # HTTP/3
ufw --force enable
info "Firewall configured (SSH, HTTP, HTTPS)"
else
warn "ufw not found — install it or configure your firewall manually"
fi
}
setup_fail2ban() {
step "Setting up Fail2ban"
if command -v fail2ban-server &> /dev/null; then
systemctl enable fail2ban
systemctl start fail2ban
info "Fail2ban active"
else
apt-get install -y fail2ban 2>/dev/null && systemctl enable fail2ban && systemctl start fail2ban && info "Fail2ban installed and active" || \
warn "Could not install fail2ban — install manually for SSH protection"
fi
}
clone_or_update() {
step "Setting up Timmy"
if [ -d "$INSTALL_DIR/.git" ]; then
info "Existing installation found at $INSTALL_DIR — updating..."
cd "$INSTALL_DIR"
git pull origin master || git pull origin main || warn "Could not pull updates"
elif [ -f "./docker-compose.prod.yml" ]; then
info "Running from repo directory — copying to $INSTALL_DIR"
mkdir -p "$INSTALL_DIR"
cp -r . "$INSTALL_DIR/"
cd "$INSTALL_DIR"
else
info "Cloning Timmy Time Dashboard..."
git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git "$INSTALL_DIR"
cd "$INSTALL_DIR"
fi
mkdir -p data
}
configure_env() {
step "Configuring Environment"
local ENV_FILE="$INSTALL_DIR/.env"
if [ -f "$ENV_FILE" ]; then
warn ".env already exists — skipping (edit manually if needed)"
return
fi
# Interactive domain setup
local DOMAIN="localhost"
echo ""
read -rp " Enter your domain (or press Enter for IP-only access): " USER_DOMAIN
if [ -n "$USER_DOMAIN" ]; then
DOMAIN="$USER_DOMAIN"
fi
# Interactive model selection
local MODEL="llama3.2"
echo ""
echo " Available LLM models:"
echo " 1) llama3.2 (~2GB, fast, good for most tasks)"
echo " 2) llama3.1:8b (~4.7GB, better reasoning)"
echo " 3) mistral (~4.1GB, good all-rounder)"
echo " 4) phi3 (~2.2GB, compact and fast)"
echo ""
read -rp " Select model [1-4, default=1]: " MODEL_CHOICE
case "$MODEL_CHOICE" in
2) MODEL="llama3.1:8b" ;;
3) MODEL="mistral" ;;
4) MODEL="phi3" ;;
*) MODEL="llama3.2" ;;
esac
# Generate secrets
local HMAC_SECRET
HMAC_SECRET=$(generate_secret)
local MACAROON_SECRET
MACAROON_SECRET=$(generate_secret)
cat > "$ENV_FILE" <<EOF
# ── Timmy Time — Production Environment ──────────────────────────────────────
# Generated by deploy/setup.sh on $(date -u +%Y-%m-%dT%H:%M:%SZ)
# Domain for auto-HTTPS (set to your domain, or localhost for IP-only)
DOMAIN=$DOMAIN
# LLM model
OLLAMA_MODEL=$MODEL
# L402 Lightning secrets (auto-generated)
L402_HMAC_SECRET=$HMAC_SECRET
L402_MACAROON_SECRET=$MACAROON_SECRET
# Telegram bot token (optional — get from @BotFather)
TELEGRAM_TOKEN=
# Debug mode (set to true to enable /docs endpoint)
DEBUG=false
EOF
chmod 600 "$ENV_FILE"
info "Environment configured (secrets auto-generated)"
info "Domain: $DOMAIN"
info "Model: $MODEL"
}
build_and_start() {
step "Building and Starting Timmy"
cd "$INSTALL_DIR"
docker compose -f docker-compose.prod.yml build
docker compose -f docker-compose.prod.yml up -d
info "Stack is starting..."
}
pull_model() {
step "Pulling LLM Model"
local MODEL
MODEL=$(grep -oP 'OLLAMA_MODEL=\K.*' "$INSTALL_DIR/.env" 2>/dev/null || echo "llama3.2")
info "Waiting for Ollama to be ready..."
local retries=0
while [ $retries -lt 30 ]; do
if docker exec timmy-ollama curl -sf http://localhost:11434/api/tags > /dev/null 2>&1; then
break
fi
sleep 5
retries=$((retries + 1))
done
if [ $retries -ge 30 ]; then
warn "Ollama not ready after 150s — pull model manually:"
warn " docker exec timmy-ollama ollama pull $MODEL"
return
fi
info "Pulling $MODEL (this may take a few minutes)..."
docker exec timmy-ollama ollama pull "$MODEL"
info "Model $MODEL ready"
}
setup_systemd() {
step "Enabling Auto-Start on Boot"
cp "$INSTALL_DIR/deploy/timmy.service" /etc/systemd/system/timmy.service
systemctl daemon-reload
systemctl enable timmy
info "Timmy will auto-start on reboot"
}
print_summary() {
local DOMAIN
DOMAIN=$(grep -oP 'DOMAIN=\K.*' "$INSTALL_DIR/.env" 2>/dev/null || echo "localhost")
local IP
IP=$(curl -4sf https://ifconfig.me 2>/dev/null || hostname -I 2>/dev/null | awk '{print $1}' || echo "your-server-ip")
echo ""
echo -e "${GREEN}${BOLD}"
echo " ╔══════════════════════════════════════════╗"
echo " ║ Timmy is LIVE! ║"
echo " ╚══════════════════════════════════════════╝"
echo -e "${NC}"
echo ""
if [ "$DOMAIN" != "localhost" ]; then
echo -e " ${BOLD}Dashboard:${NC} https://$DOMAIN"
fi
echo -e " ${BOLD}Dashboard:${NC} http://$IP"
echo ""
echo -e " ${BOLD}Useful commands:${NC}"
echo " systemctl status timmy # check status"
echo " systemctl restart timmy # restart stack"
echo " docker compose -f /opt/timmy/docker-compose.prod.yml logs -f # tail logs"
echo " nano /opt/timmy/.env # edit config"
echo ""
echo -e " ${BOLD}Scale agents:${NC}"
echo " cd /opt/timmy"
echo " docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=4"
echo ""
echo -e " ${BOLD}Update Timmy:${NC}"
echo " cd /opt/timmy && git pull && docker compose -f docker-compose.prod.yml up -d --build"
echo ""
}
# ── Main ─────────────────────────────────────────────────────────────────────
banner
check_root
install_docker
setup_firewall
setup_fail2ban
clone_or_update
configure_env
build_and_start
pull_model
setup_systemd
print_summary

29
deploy/timmy.service Normal file
View File

@@ -0,0 +1,29 @@
[Unit]
Description=Timmy Time — Mission Control
Documentation=https://github.com/AlexanderWhitestone/Timmy-time-dashboard
After=docker.service network-online.target
Requires=docker.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/timmy
EnvironmentFile=-/opt/timmy/.env
# Start the full production stack
ExecStart=/usr/bin/docker compose -f docker-compose.prod.yml up -d
ExecStop=/usr/bin/docker compose -f docker-compose.prod.yml down
ExecReload=/usr/bin/docker compose -f docker-compose.prod.yml restart
# Restart policy
Restart=on-failure
RestartSec=30
# Security hardening
NoNewPrivileges=true
ProtectSystem=strict
ReadWritePaths=/opt/timmy /var/run/docker.sock
[Install]
WantedBy=multi-user.target

152
docker-compose.prod.yml Normal file
View File

@@ -0,0 +1,152 @@
# ── Timmy Time — Production Stack ────────────────────────────────────────────
#
# One-click cloud deployment. Includes:
# - Caddy auto-HTTPS reverse proxy (Let's Encrypt)
# - Dashboard FastAPI app + swarm coordinator
# - Timmy sovereign AI agent
# - Ollama local LLM inference engine
# - Watchtower auto-updates containers when images change
#
# Usage:
# cp .env.example .env # edit with your domain + secrets
# docker compose -f docker-compose.prod.yml up -d
#
# Scale agents:
# docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=4
services:
# ── Caddy — automatic HTTPS reverse proxy ──────────────────────────────────
caddy:
image: caddy:2-alpine
container_name: timmy-caddy
ports:
- "80:80"
- "443:443"
- "443:443/udp" # HTTP/3
volumes:
- ./deploy/Caddyfile:/etc/caddy/Caddyfile:ro
- caddy-data:/data
- caddy-config:/config
environment:
DOMAIN: "${DOMAIN:-localhost}"
networks:
- swarm-net
restart: unless-stopped
# ── Ollama — local LLM inference ───────────────────────────────────────────
ollama:
image: ollama/ollama:latest
container_name: timmy-ollama
volumes:
- ollama-models:/root/.ollama
networks:
- swarm-net
restart: unless-stopped
# GPU passthrough (uncomment for NVIDIA GPU)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
# ── Dashboard (coordinator + FastAPI) ──────────────────────────────────────
dashboard:
build: .
image: timmy-time:latest
container_name: timmy-dashboard
volumes:
- timmy-data:/app/data
environment:
DEBUG: "${DEBUG:-false}"
OLLAMA_URL: "http://ollama:11434"
OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
L402_HMAC_SECRET: "${L402_HMAC_SECRET:-}"
L402_MACAROON_SECRET: "${L402_MACAROON_SECRET:-}"
TELEGRAM_TOKEN: "${TELEGRAM_TOKEN:-}"
networks:
- swarm-net
depends_on:
ollama:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 15s
# ── Timmy — sovereign AI agent ─────────────────────────────────────────────
timmy:
build: .
image: timmy-time:latest
container_name: timmy-agent
volumes:
- timmy-data:/app/data
environment:
COORDINATOR_URL: "http://dashboard:8000"
OLLAMA_URL: "http://ollama:11434"
OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
TIMMY_AGENT_ID: "timmy"
command: ["python", "-m", "timmy.docker_agent"]
networks:
- swarm-net
depends_on:
dashboard:
condition: service_healthy
restart: unless-stopped
# ── Agent worker template ──────────────────────────────────────────────────
agent:
build: .
image: timmy-time:latest
profiles:
- agents
volumes:
- timmy-data:/app/data
environment:
COORDINATOR_URL: "http://dashboard:8000"
OLLAMA_URL: "http://ollama:11434"
OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
AGENT_NAME: "${AGENT_NAME:-Worker}"
AGENT_CAPABILITIES: "${AGENT_CAPABILITIES:-general}"
command: ["sh", "-c", "python -m swarm.agent_runner --agent-id agent-$(hostname) --name $${AGENT_NAME:-Worker}"]
networks:
- swarm-net
depends_on:
dashboard:
condition: service_healthy
restart: unless-stopped
# ── Watchtower — auto-update containers ────────────────────────────────────
watchtower:
image: containrrr/watchtower
container_name: timmy-watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
WATCHTOWER_CLEANUP: "true"
WATCHTOWER_POLL_INTERVAL: "3600" # check every hour
WATCHTOWER_LABEL_ENABLE: "false"
restart: unless-stopped
# ── Volumes ──────────────────────────────────────────────────────────────────
volumes:
timmy-data:
caddy-data:
caddy-config:
ollama-models:
# ── Network ──────────────────────────────────────────────────────────────────
networks:
swarm-net:
driver: bridge