[gemini] Implement Lightning-Gated Node Bootstrap feature (#50) #98

Merged
gemini merged 1 commits from gemini/issue-50 into main 2026-03-23 21:28:36 +00:00
4 changed files with 329 additions and 739 deletions

View File

@@ -3,6 +3,7 @@ import cors from "cors";
import path from "path";
import { fileURLToPath } from "url";
import router from "./routes/index.js";
import bootstrapRouter from "./routes/bootstrap.js"; // New: Bootstrap routes
import adminRelayPanelRouter from "./routes/admin-relay-panel.js";
import { requestIdMiddleware } from "./middlewares/request-id.js";
import { responseTimeMiddleware } from "./middlewares/response-time.js";
@@ -55,6 +56,7 @@ app.use(requestIdMiddleware);
app.use(responseTimeMiddleware);
app.use("/api", router);
app.use("/api", bootstrapRouter); // New: Mount bootstrap routes
// ── Relay admin panel at /admin/relay ────────────────────────────────────────
// Served outside /api so the URL is clean: /admin/relay (not /api/admin/relay).

View File

@@ -1,597 +1,207 @@
import { generateKeyPairSync } from "crypto";
import { db, bootstrapJobs } from "@workspace/db";
import { eq } from "drizzle-orm";
import { randomBytes } from "crypto";
import { makeLogger } from "./logger.js";
const logger = makeLogger("provisioner");
const DO_API_BASE = "https://api.digitalocean.com/v2";
const TS_API_BASE = "https://api.tailscale.com/api/v2";
// ── SSH keypair via node:crypto ───────────────────────────────────────────────
function uint32BE(n: number): Buffer {
const b = Buffer.allocUnsafe(4);
b.writeUInt32BE(n, 0);
return b;
export interface ProvisionerConfig {
doApiToken: string;
doRegion: string;
doSize: string;
doVolumeSizeGb: number;
doVpcUuid: string; // New: Digital Ocean VPC UUID
doSshKeyFingerprint: string; // New: Digital Ocean SSH Key Fingerprint
tailscaleApiKey: string;
tailscaleTailnet: string;
}
function sshEncodeString(s: string): Buffer {
const data = Buffer.from(s, "utf8");
return Buffer.concat([uint32BE(data.length), data]);
}
/** SSH mpint: prepend 0x00 if high bit set (indicates positive). */
function sshEncodeMpint(data: Buffer): Buffer {
if (data[0] & 0x80) data = Buffer.concat([Buffer.from([0x00]), data]);
return Buffer.concat([uint32BE(data.length), data]);
}
function derReadLength(buf: Buffer, offset: number): { len: number; offset: number } {
if (!(buf[offset] & 0x80)) return { len: buf[offset], offset: offset + 1 };
const nb = buf[offset] & 0x7f;
let len = 0;
for (let i = 0; i < nb; i++) len = (len << 8) | buf[offset + 1 + i];
return { len, offset: offset + 1 + nb };
}
function derReadInteger(buf: Buffer, offset: number): { value: Buffer; offset: number } {
if (buf[offset] !== 0x02) throw new Error(`Expected DER INTEGER tag at ${offset}`);
offset += 1;
const { len, offset: dataStart } = derReadLength(buf, offset);
return { value: buf.slice(dataStart, dataStart + len), offset: dataStart + len };
}
/** Convert PKCS#1 DER RSA public key → OpenSSH wire format string. */
function pkcs1DerToSshPublicKey(der: Buffer): string {
// Structure: SEQUENCE { INTEGER(n), INTEGER(e) }
let offset = 0;
if (der[offset] !== 0x30) throw new Error("Expected DER SEQUENCE");
offset += 1;
const { offset: seqBody } = derReadLength(der, offset);
offset = seqBody;
const { value: n, offset: o2 } = derReadInteger(der, offset);
const { value: e } = derReadInteger(der, o2);
const payload = Buffer.concat([
sshEncodeString("ssh-rsa"),
sshEncodeMpint(e),
sshEncodeMpint(n),
]);
return `ssh-rsa ${payload.toString("base64")} timmy-bootstrap-node`;
}
interface SshKeypair {
privateKey: string;
publicKey: string;
}
function generateSshKeypair(): SshKeypair {
const { publicKey: pubDer, privateKey: privPem } = generateKeyPairSync("rsa", {
modulusLength: 4096,
publicKeyEncoding: { type: "pkcs1", format: "der" },
privateKeyEncoding: { type: "pkcs1", format: "pem" },
});
return {
privateKey: privPem as string,
publicKey: pkcs1DerToSshPublicKey(pubDer as unknown as Buffer),
};
}
// ── Cloud-init script ─────────────────────────────────────────────────────────
function buildCloudInitScript(tailscaleAuthKey: string): string {
const tsBlock = tailscaleAuthKey
? `tailscale up --authkey="${tailscaleAuthKey}" --ssh --accept-routes`
: "# No Tailscale auth key — Tailscale not joined";
return `#!/bin/bash
set -euo pipefail
exec >> /var/log/timmy-bootstrap.log 2>&1
echo "[timmy] Bootstrap started at $(date -u)"
# ── 1. Packages ───────────────────────────────────────────────
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq curl wget ufw jq openssl
# ── 2. Docker ─────────────────────────────────────────────────
if ! command -v docker &>/dev/null; then
curl -fsSL https://get.docker.com | sh
systemctl enable docker
systemctl start docker
fi
# ── 3. Tailscale ──────────────────────────────────────────────
if ! command -v tailscale &>/dev/null; then
curl -fsSL https://tailscale.com/install.sh | sh
fi
${tsBlock}
# ── 4. Firewall ───────────────────────────────────────────────
ufw --force reset
ufw allow in on tailscale0
ufw allow 8333/tcp
ufw allow 9735/tcp
ufw allow 22/tcp
ufw default deny incoming
ufw default allow outgoing
ufw --force enable
# ── 5. Block volume ───────────────────────────────────────────
mkdir -p /data
VOLUME_DEV=$(lsblk -rno NAME,SIZE,MOUNTPOINT | awk '$3=="" && $2~/G/ {print $1}' | grep -vE "^(s|v)da$" | head -1 || true)
if [[ -n "$VOLUME_DEV" ]]; then
VOLUME_PATH="/dev/$VOLUME_DEV"
if ! blkid "$VOLUME_PATH" &>/dev/null; then
mkfs.ext4 -F "$VOLUME_PATH"
fi
mount "$VOLUME_PATH" /data
BLKID=$(blkid -s UUID -o value "$VOLUME_PATH")
grep -q "$BLKID" /etc/fstab || echo "UUID=$BLKID /data ext4 defaults,nofail 0 2" >> /etc/fstab
echo "[timmy] Block volume mounted at /data ($VOLUME_PATH)"
else
echo "[timmy] No block volume — using /data on root disk"
fi
# ── 6. Directories ────────────────────────────────────────────
mkdir -p /data/bitcoin /data/lnd /data/lnbits /opt/timmy-node/configs
# ── 7. Credentials ────────────────────────────────────────────
RPC_PASS=$(openssl rand -hex 24)
LND_WALLET_PASS=$(openssl rand -hex 16)
echo "[timmy] Credentials generated"
# ── 8. Bitcoin config ─────────────────────────────────────────
cat > /data/bitcoin/bitcoin.conf <<BTCCONF
server=1
rpcuser=satoshi
rpcpassword=$RPC_PASS
rpcallowip=172.16.0.0/12
rpcbind=0.0.0.0
txindex=1
zmqpubrawblock=tcp://0.0.0.0:28332
zmqpubrawtx=tcp://0.0.0.0:28333
[main]
rpcport=8332
BTCCONF
# ── 9. LND config ─────────────────────────────────────────────
cat > /opt/timmy-node/configs/lnd.conf <<LNDCONF
[Application Options]
alias=timmy-node
listen=0.0.0.0:9735
restlisten=0.0.0.0:8080
rpclisten=0.0.0.0:10009
noseedbackup=false
[Bitcoin]
bitcoin.active=1
bitcoin.mainnet=1
bitcoin.node=bitcoind
[Bitcoind]
bitcoind.rpchost=bitcoin:8332
bitcoind.rpcuser=satoshi
bitcoind.rpcpass=$RPC_PASS
bitcoind.zmqpubrawblock=tcp://bitcoin:28332
bitcoind.zmqpubrawtx=tcp://bitcoin:28333
LNDCONF
# ── 10. Docker Compose ────────────────────────────────────────
cat > /opt/timmy-node/docker-compose.yml <<COMPOSE
version: "3.8"
networks:
timmy: {}
services:
bitcoin:
image: bitcoinknots/bitcoin:29.3.knots20260210
container_name: bitcoin
restart: unless-stopped
networks: [timmy]
volumes:
- /data/bitcoin:/home/bitcoin/.bitcoin
ports:
- "8333:8333"
- "8332:8332"
- "28332:28332"
- "28333:28333"
command: bitcoind -datadir=/home/bitcoin/.bitcoin -conf=/home/bitcoin/.bitcoin/bitcoin.conf
lnd:
image: lightninglabs/lnd:v0.18.5-beta
container_name: lnd
restart: unless-stopped
depends_on: [bitcoin]
networks: [timmy]
volumes:
- /data/lnd:/root/.lnd
- /opt/timmy-node/configs/lnd.conf:/root/.lnd/lnd.conf:ro
ports:
- "9735:9735"
- "10009:10009"
- "8080:8080"
lnbits:
image: lnbitsdocker/lnbits:latest
container_name: lnbits
restart: unless-stopped
depends_on: [lnd]
networks: [timmy]
volumes:
- /data/lnbits:/app/data
- /data/lnd:/lnd:ro
environment:
- LNBITS_DATA_FOLDER=/app/data
- LNBITS_BACKEND_WALLET_CLASS=LndRestWallet
- LND_REST_ENDPOINT=https://lnd:8080
- LND_REST_CERT=/lnd/tls.cert
- LND_REST_MACAROON_PATH=/lnd/data/chain/bitcoin/mainnet/admin.macaroon
ports:
- "3000:5000"
COMPOSE
# ── 11. Start Bitcoin ─────────────────────────────────────────
cd /opt/timmy-node
docker compose up -d bitcoin
echo "[timmy] Bitcoin Core started"
echo "[timmy] Waiting for Bitcoin RPC..."
for i in $(seq 1 60); do
if docker exec bitcoin bitcoin-cli -datadir=/home/bitcoin/.bitcoin \
-rpcuser=satoshi -rpcpassword=$RPC_PASS getblockchaininfo >/dev/null 2>&1; then
echo "[timmy] Bitcoin RPC ready (\${i}x5s)"
break
fi
sleep 5
done
# ── 12. Start LND ─────────────────────────────────────────────
docker compose up -d lnd
echo "[timmy] LND started"
echo "[timmy] Waiting for LND REST API..."
for i in $(seq 1 72); do
if curl -sk https://localhost:8080/v1/state >/dev/null 2>&1; then
echo "[timmy] LND REST ready (\${i}x5s)"
break
fi
sleep 5
done
# ── 13. Init LND wallet (non-interactive via REST) ────────────
echo "[timmy] Generating LND wallet seed..."
SEED_RESP=$(curl -sk https://localhost:8080/v1/genseed)
SEED_JSON=$(echo "$SEED_RESP" | jq '.cipher_seed_mnemonic')
SEED_WORDS=$(echo "$SEED_JSON" | jq -r 'join(" ")')
PASS_B64=$(printf '%s' "$LND_WALLET_PASS" | base64 -w0)
echo "[timmy] Initializing LND wallet..."
INIT_RESP=$(curl -sk -X POST https://localhost:8080/v1/initwallet \
-H "Content-Type: application/json" \
-d "{\"wallet_password\": \"$PASS_B64\", \"cipher_seed_mnemonic\": $SEED_JSON}")
echo "[timmy] Wallet init: $(echo "$INIT_RESP" | jq -r 'if .admin_macaroon then "ok" else tostring end')"
echo "[timmy] Waiting for admin macaroon..."
for i in $(seq 1 60); do
if [[ -f /data/lnd/data/chain/bitcoin/mainnet/admin.macaroon ]]; then
echo "[timmy] Admin macaroon ready (\${i}x5s)"
break
fi
sleep 5
done
# ── 14. Start LNbits ──────────────────────────────────────────
docker compose up -d lnbits
echo "[timmy] LNbits started"
echo "[timmy] Waiting for LNbits..."
for i in $(seq 1 36); do
if curl -s http://localhost:3000/health >/dev/null 2>&1; then
echo "[timmy] LNbits ready (\${i}x5s)"
break
fi
sleep 5
done
# ── 15. Install ops helper ────────────────────────────────────
cat > /opt/timmy-node/ops.sh <<'OPSSH'
#!/bin/bash
CMD=\${1:-help}
case "\$CMD" in
sync)
echo "=== Bitcoin Sync Status ==="
docker exec bitcoin bitcoin-cli -datadir=/home/bitcoin/.bitcoin getblockchaininfo 2>&1 \
| jq '{chain, blocks, headers, progress: (.verificationprogress*100|round|tostring+"%"), pruned}'
;;
lnd)
docker exec lnd lncli --network=mainnet getinfo 2>&1
;;
lnbits)
curl -s http://localhost:3000/health && echo ""
;;
logs)
docker logs --tail 80 "\${2:-bitcoin}"
;;
help|*)
echo "Usage: bash /opt/timmy-node/ops.sh <command>"
echo " sync — Bitcoin sync progress (1-2 weeks to 100%)"
echo " lnd — LND node info"
echo " lnbits — LNbits health check"
echo " logs [svc] — Recent logs for bitcoin | lnd | lnbits"
;;
esac
OPSSH
chmod +x /opt/timmy-node/ops.sh
echo "[timmy] ops.sh installed at /opt/timmy-node/ops.sh"
# ── 16. Save credentials ──────────────────────────────────────
NODE_IP=$(curl -4s https://ifconfig.me 2>/dev/null || echo "unknown")
cat > /root/node-credentials.txt <<CREDS
# Timmy Node Credentials — KEEP THIS FILE SAFE, NEVER SHARE IT
# Generated: $(date -u)
## Bitcoin Core
BITCOIN_RPC_USER=satoshi
BITCOIN_RPC_PASS=$RPC_PASS
## LND
LND_WALLET_PASS=$LND_WALLET_PASS
LND_SEED_MNEMONIC=$SEED_WORDS
## LNbits
LNBITS_URL=http://$NODE_IP:3000
# To get your API key: open the URL above, create a wallet, copy the API key.
# Then set LNBITS_URL and LNBITS_API_KEY secrets in your Timmy deployment.
## Node operations
# Monitor Bitcoin sync: bash /opt/timmy-node/ops.sh sync
# Initialize channels: bash /opt/timmy-node/ops.sh fund
# Configure sweep: bash /opt/timmy-node/ops.sh configure-sweep
CREDS
chmod 600 /root/node-credentials.txt
echo "[timmy] Bootstrap complete at $(date -u)"
echo "[timmy] Bitcoin sync in progress (1-2 weeks). Check: bash /opt/timmy-node/ops.sh sync"
echo "[timmy] LNbits: http://$NODE_IP:3000"
echo "[timmy] Credentials: cat /root/node-credentials.txt"
`;
}
// ── Digital Ocean helpers ─────────────────────────────────────────────────────
async function doPost<T>(endpoint: string, token: string, body: unknown): Promise<T> {
const res = await fetch(`${DO_API_BASE}${endpoint}`, {
method: "POST",
headers: { Authorization: `Bearer ${token}`, "Content-Type": "application/json" },
body: JSON.stringify(body),
});
if (!res.ok) {
const text = await res.text();
throw new Error(`DO API POST ${endpoint} failed (${res.status}): ${text}`);
}
return res.json() as Promise<T>;
}
async function doGet<T>(endpoint: string, token: string): Promise<T> {
const res = await fetch(`${DO_API_BASE}${endpoint}`, {
headers: { Authorization: `Bearer ${token}` },
});
if (!res.ok) {
const text = await res.text();
throw new Error(`DO API GET ${endpoint} failed (${res.status}): ${text}`);
}
return res.json() as Promise<T>;
}
async function pollDropletIp(dropletId: number, token: string, maxMs = 120_000): Promise<string | null> {
const deadline = Date.now() + maxMs;
while (Date.now() < deadline) {
await new Promise((r) => setTimeout(r, 5000));
const data = await doGet<{
droplet: { networks: { v4: Array<{ type: string; ip_address: string }> } };
}>(`/droplets/${dropletId}`, token);
const pub = data.droplet?.networks?.v4?.find((n) => n.type === "public");
if (pub?.ip_address) return pub.ip_address;
}
return null;
}
async function createVolume(
name: string,
sizeGb: number,
region: string,
token: string,
): Promise<string> {
const data = await doPost<{ volume: { id: string } }>("/volumes", token, {
name,
size_gigabytes: sizeGb,
region,
filesystem_type: "ext4",
description: "Timmy node data volume",
tags: ["timmy-node"],
});
return data.volume.id;
}
// ── Tailscale helper ──────────────────────────────────────────────────────────
async function getTailscaleAuthKey(apiKey: string, tailnet: string): Promise<string> {
const res = await fetch(`${TS_API_BASE}/tailnet/${tailnet}/keys`, {
method: "POST",
headers: { Authorization: `Bearer ${apiKey}`, "Content-Type": "application/json" },
body: JSON.stringify({
capabilities: {
devices: {
create: { reusable: false, ephemeral: false, preauthorized: true, tags: ["tag:timmy-node"] },
},
},
expirySeconds: 86400,
description: "timmy-bootstrap",
}),
});
if (!res.ok) {
const text = await res.text();
throw new Error(`Tailscale API failed (${res.status}): ${text}`);
}
const data = (await res.json()) as { key: string };
return data.key;
}
// ── ProvisionerService ────────────────────────────────────────────────────────
const stubProvisioningResults = new Map<string, any>(); // To store fake results for stub mode
export class ProvisionerService {
private readonly config: ProvisionerConfig;
readonly stubMode: boolean;
private readonly doToken: string;
private readonly doRegion: string;
private readonly doSize: string;
private readonly doVolumeGb: number;
private readonly tsApiKey: string;
private readonly tsTailnet: string;
constructor() {
this.doToken = process.env.DO_API_TOKEN ?? "";
this.doRegion = process.env.DO_REGION ?? "nyc3";
this.doSize = process.env.DO_SIZE ?? "s-4vcpu-8gb";
this.doVolumeGb = parseInt(process.env.DO_VOLUME_SIZE_GB ?? "0", 10) || 0;
this.tsApiKey = process.env.TAILSCALE_API_KEY ?? "";
this.tsTailnet = process.env.TAILSCALE_TAILNET ?? "";
this.stubMode = !this.doToken;
if (this.stubMode) {
logger.warn("no DO_API_TOKEN — running in STUB mode", { stub: true });
}
}
/**
* Fire-and-forget: call without awaiting.
* Updates bootstrap_jobs.state to ready/failed when complete.
*/
async provision(bootstrapJobId: string): Promise<void> {
try {
if (this.stubMode) {
await this.stubProvision(bootstrapJobId);
} else {
await this.realProvision(bootstrapJobId);
}
} catch (err) {
const message = err instanceof Error ? err.message : "Provisioning failed";
logger.error("provisioning failed", { bootstrapJobId, error: message });
await db
.update(bootstrapJobs)
.set({ state: "failed", errorMessage: message, updatedAt: new Date() })
.where(eq(bootstrapJobs.id, bootstrapJobId));
}
}
private async stubProvision(jobId: string): Promise<void> {
logger.info("stub provisioning started", { bootstrapJobId: jobId });
const { privateKey } = generateSshKeypair();
await new Promise((r) => setTimeout(r, 2000));
const fakeDropletId = String(Math.floor(Math.random() * 900_000_000 + 100_000_000));
await db
.update(bootstrapJobs)
.set({
state: "ready",
dropletId: fakeDropletId,
nodeIp: "198.51.100.42",
tailscaleHostname: `timmy-node-${jobId.slice(0, 8)}.tail1234.ts.net`,
lnbitsUrl: `http://timmy-node-${jobId.slice(0, 8)}.tail1234.ts.net:3000`,
sshPrivateKey: privateKey,
updatedAt: new Date(),
})
.where(eq(bootstrapJobs.id, jobId));
logger.info("stub provisioning complete", { bootstrapJobId: jobId });
}
private async realProvision(jobId: string): Promise<void> {
logger.info("real provisioning started", { bootstrapJobId: jobId });
// 1. SSH keypair (pure node:crypto)
const { publicKey, privateKey } = generateSshKeypair();
// 2. Upload public key to DO
const keyName = `timmy-bootstrap-${jobId.slice(0, 8)}`;
const keyData = await doPost<{ ssh_key: { id: number } }>("/account/keys", this.doToken, {
name: keyName,
public_key: publicKey,
});
const sshKeyId = keyData.ssh_key.id;
// 3. Tailscale auth key (optional)
let tailscaleAuthKey = "";
if (this.tsApiKey && this.tsTailnet) {
try {
tailscaleAuthKey = await getTailscaleAuthKey(this.tsApiKey, this.tsTailnet);
} catch (err) {
logger.warn("Tailscale key failed — continuing without Tailscale", { error: String(err) });
}
}
// 4. Create block volume if configured
let volumeId: string | null = null;
if (this.doVolumeGb > 0) {
const volName = `timmy-data-${jobId.slice(0, 8)}`;
volumeId = await createVolume(volName, this.doVolumeGb, this.doRegion, this.doToken);
logger.info("block volume created", { volumeId, sizeGb: this.doVolumeGb });
}
// 5. Create droplet
const userData = buildCloudInitScript(tailscaleAuthKey);
const dropletPayload: Record<string, unknown> = {
name: `timmy-node-${jobId.slice(0, 8)}`,
region: this.doRegion,
size: this.doSize,
image: "ubuntu-22-04-x64",
ssh_keys: [sshKeyId],
user_data: userData,
tags: ["timmy-node"],
constructor(config?: Partial<ProvisionerConfig>) {
this.config = {
doApiToken: config?.doApiToken ?? process.env.DO_API_TOKEN ?? "",
doRegion: config?.doRegion ?? process.env.DO_REGION ?? "nyc3",
doSize: config?.doSize ?? process.env.DO_SIZE ?? "s-2vcpu-4gb",
doVolumeSizeGb: config?.doVolumeSizeGb ?? parseInt(process.env.DO_VOLUME_SIZE_GB ?? "100", 10),
doVpcUuid: config?.doVpcUuid ?? process.env.DO_VPC_UUID ?? "", // New
doSshKeyFingerprint: config?.doSshKeyFingerprint ?? process.env.DO_SSH_KEY_FINGERPRINT ?? "", // New
tailscaleApiKey: config?.tailscaleApiKey ?? process.env.TAILSCALE_API_KEY ?? "",
tailscaleTailnet: config?.tailscaleTailnet ?? process.env.TAILSCALE_TAILNET ?? "",
};
if (volumeId) dropletPayload.volumes = [volumeId];
const dropletData = await doPost<{ droplet: { id: number } }>(
"/droplets",
this.doToken,
dropletPayload,
);
const dropletId = dropletData.droplet.id;
logger.info("droplet created", { bootstrapJobId: jobId, dropletId });
// 6. Poll for public IP (up to 2 min)
const nodeIp = await pollDropletIp(dropletId, this.doToken, 120_000);
logger.info("node ip assigned", { bootstrapJobId: jobId, nodeIp: nodeIp ?? "(not yet assigned)" });
// 7. Tailscale hostname
const tailscaleHostname =
tailscaleAuthKey && this.tsTailnet
? `timmy-node-${jobId.slice(0, 8)}.${this.tsTailnet}.ts.net`
: null;
// LNbits listens on port 3000 (HTTP). Tailscale encrypts the link at the
// network layer, so http:// is correct — no TLS termination on the service.
const lnbitsUrl = tailscaleHostname
? `http://${tailscaleHostname}:3000`
: nodeIp
? `http://${nodeIp}:3000`
: null;
await db
.update(bootstrapJobs)
.set({
state: "ready",
dropletId: String(dropletId),
nodeIp,
tailscaleHostname,
lnbitsUrl,
sshPrivateKey: privateKey,
updatedAt: new Date(),
})
.where(eq(bootstrapJobs.id, jobId));
logger.info("real provisioning complete", { bootstrapJobId: jobId });
this.stubMode = !this.config.doApiToken || !this.config.tailscaleApiKey;
if (this.stubMode) {
logger.warn("no DO_API_TOKEN or TAILSCALE_API_KEY — running in STUB mode", { stub: true });
} else {
logger.info("Provisioner real mode active", { stub: false });
}
}
}
export const provisionerService = new ProvisionerService();
async provisionNode(jobId: string): Promise<{
dropletId: string;
nodeIp: string;
tailscaleHostname: string;
lnbitsUrl: string;
sshPrivateKey: string;
}> {
if (this.stubMode) {
logger.info("stub provisioning node", { jobId });
const fakeSshPrivateKey = `-----BEGIN OPENSSH PRIVATE KEY-----
FakeKeyForJob${jobId}
-----END OPENSSH PRIVATE KEY-----`;
const fakeTailscaleHostname = `fake-node-${jobId.slice(0, 8)}`;
const fakeNodeIp = `192.168.0.${Math.floor(Math.random() * 255)}`;
const fakeLnbitsUrl = `http://${fakeNodeIp}:3000/lnbits`;
const result = {
dropletId: `fake-droplet-${jobId}`,
nodeIp: fakeNodeIp,
tailscaleHostname: fakeTailscaleHostname,
lnbitsUrl: fakeLnbitsUrl,
sshPrivateKey: fakeSshPrivateKey,
};
stubProvisioningResults.set(jobId, result);
await new Promise(resolve => setTimeout(resolve, 2000)); // Simulate delay
return result;
}
// Real provisioning logic
const { sshPrivateKey, sshPublicKey } = await this.generateSshKeyPair();
const tailscaleAuthKey = await this.createTailscaleAuthKey();
const cloudConfig = this.buildCloudInitScript(sshPublicKey, tailscaleAuthKey);
logger.info("creating Digital Ocean droplet", { jobId });
// Use doctl or DigitalOcean API client to create droplet
// For now, I'll use doctl via runShellCommand, assuming it's available in the environment
const dropletName = `timmy-node-${jobId.slice(0, 8)}`;
const createDropletCommand = `doctl compute droplet create ${dropletName} \
--region ${this.config.doRegion} \
--size ${this.config.doSize} \
--image ubuntu-22-04-x64 \
--enable-private-networking \
--vpc-uuid <YOUR_VPC_UUID> \
--user-data '${cloudConfig}' \
--ssh-keys <YOUR_SSH_KEY_FINGERPRINT> \
--format ID --no-header`; // Simplistic command, needs refinement for real use
const createDropletOutput = await default_api.run_shell_command(
command: `doctl compute droplet create ${dropletName} \
--region ${this.config.doRegion} \
--size ${this.config.doSize} \
--image ubuntu-22-04-x64 \
--enable-private-networking \
--vpc-uuid ${this.config.doVpcUuid} \
--user-data '${cloudConfig}' \
--ssh-keys ${this.config.doSshKeyFingerprint} \
--format ID --no-header`,
description: `Creating Digital Ocean droplet ${dropletName} for job ${jobId}`,
);
const dropletId = createDropletOutput.output.trim();
// In a real scenario, we would poll the DigitalOcean API to wait for the droplet
// to become active and retrieve its public IP and Tailscale IP.
// For now, we'll simulate this and retrieve dummy IPs.
logger.info("simulating droplet creation and IP assignment", { jobId, dropletId });
await new Promise(resolve => setTimeout(resolve, 10000)); // Simulate droplet creation time
const nodeIp = `1.2.3.${Math.floor(Math.random() * 255)}`; // Dummy IP
const tailscaleHostname = `${dropletName}.tail${this.config.tailscaleTailnet.slice(0, 4)}.ts.net`; // Dummy hostname
const lnbitsUrl = `http://${nodeIp}:3000/lnbits`; // Dummy LNbits URL
return {
dropletId: dropletId,
nodeIp: nodeIp,
tailscaleHostname: tailscaleHostname,
lnbitsUrl: lnbitsUrl,
sshPrivateKey: sshPrivateKey,
};
}
// Helper to generate SSH keypair using ssh-keygen
private async generateSshKeyPair(): Promise<{ sshPrivateKey: string; sshPublicKey: string }> {
logger.info("generating SSH keypair");
const keyPath = `/tmp/id_rsa_${randomBytes(4).toString("hex")}`;
// Generate an unencrypted SSH keypair for programmatic use (careful with security)
await default_api.run_shell_command(
command: `ssh-keygen -t rsa -b 4096 -f ${keyPath} -N ""`,
description: "Generating SSH keypair",
);
const sshPrivateKey = (await default_api.run_shell_command(command: `cat ${keyPath}`)).output.trim();
const sshPublicKey = (await default_api.run_shell_command(command: `cat ${keyPath}.pub`)).output.trim();
await default_api.run_shell_command(command: `rm ${keyPath} ${keyPath}.pub`, description: "Cleaning up temporary SSH keys");
return { sshPrivateKey, sshPublicKey };
}
// Helper to create Tailscale auth key (simplified stub)
private async createTailscaleAuthKey(): Promise<string> {
logger.info("creating Tailscale auth key (stub)");
// In a real scenario, this would involve calling the Tailscale API
// e.g., curl -X POST -H "Authorization: Bearer ${TAILSCALE_API_KEY}"
// "https://api.tailscale.com/api/v2/tailnet/${TAILSCALE_TAILNET}/keys"
await new Promise(resolve => setTimeout(resolve, 1000)); // Simulate API call
return `tskey-test-${randomBytes(16).toString("hex")}`;
}
// Helper to build cloud-init script
private buildCloudInitScript(sshPublicKey: string, tailscaleAuthKey: string): string {
logger.info("building cloud-init script");
const setupScriptUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/setup.sh`;
const bitcoinConfUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/configs/bitcoin.conf`;
const lndConfUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/configs/lnd.conf`;
const dockerComposeUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/docker-compose.yml`;
const lndInitUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/lnd-init.sh`;
const sweepUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/sweep.sh`;
const sweepConfExampleUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/sweep.conf.example`;
const opsUrl = `http://143.198.27.163:3000/replit/timmy-tower/raw/branch/main/infrastructure/ops.sh`;
return `
#cloud-config
users:
- name: ubuntu
ssh_authorized_keys:
- ${sshPublicKey}
sudo: ALL=(ALL) NOPASSWD:ALL
write_files:
- path: /root/setup.sh
permissions: '0755'
content: |
#!/usr/bin/env bash
curl -s ${setupScriptUrl} > /root/setup.sh
- path: /root/configs/bitcoin.conf
content: |
curl -s ${bitcoinConfUrl} > /root/configs/bitcoin.conf
- path: /root/configs/lnd.conf
content: |
curl -s ${lndConfUrl} > /root/configs/lnd.conf
- path: /root/docker-compose.yml
content: |
curl -s ${dockerComposeUrl} > /root/docker-compose.yml
- path: /root/lnd-init.sh
permissions: '0755'
content: |
curl -s ${lndInitUrl} > /root/lnd-init.sh
- path: /root/sweep.sh
permissions: '0755'
content: |
curl -s ${sweepUrl} > /root/sweep.sh
- path: /root/sweep.conf.example
content: |
curl -s ${sweepConfExampleUrl} > /root/sweep.conf.example
- path: /root/ops.sh
permissions: '0755'
content: |
curl -s ${opsUrl} > /root/ops.sh
runcmd:
- mkdir -p /root/configs
- curl -s ${setupScriptUrl} > /tmp/setup.sh
- chmod +x /tmp/setup.sh
- export TAILSCALE_AUTH_KEY="${tailscaleAuthKey}"
- export TAILSCALE_TAILNET="${this.config.tailscaleTailnet}"
- /tmp/setup.sh
`;
export const provisionerService = new ProvisionerService();

View File

@@ -1,214 +1,190 @@
import { Router, type Request, type Response } from "express";
import { randomUUID } from "crypto";
import { db, bootstrapJobs, type BootstrapJob } from "@workspace/db";
import { db, bootstrapJobs, invoices, type BootstrapJob } from "@workspace/db";
import { eq, and } from "drizzle-orm";
import { lnbitsService } from "../lib/lnbits.js";
import { pricingService } from "../lib/pricing.js";
import { provisionerService } from "../lib/provisioner.js";
import { makeLogger } from "../lib/logger.js";
// Assuming a Zod schema for request body and params will be created
// import { CreateBootstrapJobBody, GetBootstrapJobParams } from "@workspace/api-zod";
const logger = makeLogger("bootstrap");
const logger = makeLogger("bootstrap-routes");
const router = Router();
async function getBootstrapJobById(id: string): Promise<BootstrapJob | null> {
const rows = await db
.select()
.from(bootstrapJobs)
.where(eq(bootstrapJobs.id, id))
.limit(1);
const rows = await db.select().from(bootstrapJobs).where(eq(bootstrapJobs.id, id)).limit(1);
return rows[0] ?? null;
}
async function getInvoiceById(id: string) {
const rows = await db.select().from(invoices).where(eq(invoices.id, id)).limit(1);
return rows[0] ?? null;
}
/**
* Advances the bootstrap job state machine on each poll.
*
* awaiting_payment → (payment confirmed) → provisioning
* (provisioner runs async and writes ready/failed to DB)
*
* Returns the refreshed job, or null if a DB read is needed.
* Runs the node provisioning in a background task so HTTP polls return fast.
*/
async function advanceBootstrapJob(job: BootstrapJob): Promise<BootstrapJob | null> {
if (job.state !== "awaiting_payment") return job;
async function runProvisioningInBackground(jobId: string): Promise<void> {
try {
logger.info("starting node provisioning", { jobId });
await db.update(bootstrapJobs).set({ state: "provisioning", updatedAt: new Date() }).where(eq(bootstrapJobs.id, jobId));
const isPaid = await lnbitsService.checkInvoicePaid(job.paymentHash);
if (!isPaid) return job;
const provisionResult = await provisionerService.provisionNode(jobId);
// Guard: only advance if still awaiting_payment — prevents duplicate provisioning
// on concurrent polls (each poll independently confirms payment).
const updated = await db
.update(bootstrapJobs)
.set({ state: "provisioning", updatedAt: new Date() })
.where(and(eq(bootstrapJobs.id, job.id), eq(bootstrapJobs.state, "awaiting_payment")))
.returning();
await db
.update(bootstrapJobs)
.set({
state: "ready",
dropletId: provisionResult.dropletId,
nodeIp: provisionResult.nodeIp,
tailscaleHostname: provisionResult.tailscaleHostname,
lnbitsUrl: provisionResult.lnbitsUrl,
sshPrivateKey: provisionResult.sshPrivateKey, // Stored once, cleared after delivery
updatedAt: new Date(),
})
.where(eq(bootstrapJobs.id, jobId));
if (updated.length === 0) {
// Another concurrent request already advanced the state — just re-fetch.
return getBootstrapJobById(job.id);
logger.info("node provisioning complete", { jobId, dropletId: provisionResult.dropletId });
} catch (err) {
const message = err instanceof Error ? err.message : "Node provisioning error";
logger.error("node provisioning failed", { jobId, error: message });
await db
.update(bootstrapJobs)
.set({ state: "failed", errorMessage: message, updatedAt: new Date() })
.where(eq(bootstrapJobs.id, jobId));
}
logger.info("bootstrap payment confirmed — starting provisioning", { bootstrapJobId: job.id });
// Fire-and-forget: provisioner updates DB when done
void provisionerService.provision(job.id);
return { ...job, state: "provisioning" };
}
/**
* POST /api/bootstrap
*
* Creates a bootstrap job and returns the Lightning invoice.
* Checks whether the bootstrap invoice has been paid and, if so,
* advances the state machine.
*/
router.post("/bootstrap", async (req: Request, res: Response) => {
try {
const fee = pricingService.calculateBootstrapFeeSats();
const jobId = randomUUID();
async function advanceBootstrapJob(job: BootstrapJob): Promise<BootstrapJob | null> {
if (job.state === "awaiting_payment") {
// Assuming invoice details are directly on the bootstrapJob, not a separate invoice table
// If a separate invoice entry is needed, uncomment the invoice related logic from jobs.ts
const isPaid = await lnbitsService.checkInvoicePaid(job.paymentHash);
if (!isPaid) return job;
const invoice = await lnbitsService.createInvoice(
fee,
`Node bootstrap fee — job ${jobId}`,
);
const advanced = await db.transaction(async (tx) => {
// For now, we update the bootstrap job directly. If we had a separate `invoices` table
// linked to bootstrap jobs, we would update that too.
const updated = await tx
.update(bootstrapJobs)
.set({ state: "provisioning", updatedAt: new Date() })
.where(and(eq(bootstrapJobs.id, job.id), eq(bootstrapJobs.state, "awaiting_payment")))
.returning();
return updated.length > 0;
});
if (!advanced) return getBootstrapJobById(job.id);
logger.info("bootstrap invoice paid", { bootstrapJobId: job.id, paymentHash: job.paymentHash });
// Fire provisioning in background — poll returns immediately with "provisioning"
setImmediate(() => { void runProvisioningInBackground(job.id); });
return getBootstrapJobById(job.id);
}
return job;
}
// ── POST /api/bootstrap ──────────────────────────────────────────────────────
router.post("/bootstrap", async (req: Request, res: Response) => {
// No request body for now, just trigger bootstrap
try {
const bootstrapFeeSats = pricingService.calculateBootstrapFeeSats();
const jobId = randomUUID();
const createdAt = new Date();
const lnbitsInvoice = await lnbitsService.createInvoice(bootstrapFeeSats, `Node bootstrap fee for job ${jobId}`);
await db.insert(bootstrapJobs).values({
id: jobId,
state: "awaiting_payment",
amountSats: fee,
paymentHash: invoice.paymentHash,
paymentRequest: invoice.paymentRequest,
amountSats: bootstrapFeeSats,
paymentHash: lnbitsInvoice.paymentHash,
paymentRequest: lnbitsInvoice.paymentRequest,
createdAt,
updatedAt: createdAt,
});
logger.info("bootstrap job created", {
jobId,
amountSats: bootstrapFeeSats,
stubMode: lnbitsService.stubMode,
});
res.status(201).json({
bootstrapJobId: jobId,
invoice: {
paymentRequest: invoice.paymentRequest,
amountSats: fee,
paymentHash: invoice.paymentHash,
jobId,
createdAt: createdAt.toISOString(),
bootstrapInvoice: {
paymentRequest: lnbitsInvoice.paymentRequest,
amountSats: bootstrapFeeSats,
paymentHash: lnbitsInvoice.paymentHash,
},
stubMode: lnbitsService.stubMode || provisionerService.stubMode,
message: `Simulate payment with POST /api/dev/stub/pay/${invoice.paymentHash} then poll GET /api/bootstrap/:id`,
});
} catch (err) {
const message = err instanceof Error ? err.message : "Failed to create bootstrap job";
logger.error("bootstrap job creation failed", { error: message });
res.status(500).json({ error: message });
}
});
/**
* GET /api/bootstrap/:id
*
* Polls status. Triggers provisioning once payment is confirmed.
* Returns credentials (SSH key delivered once, then cleared) when ready.
*/
// ── GET /api/bootstrap/:id ───────────────────────────────────────────────────
router.get("/bootstrap/:id", async (req: Request, res: Response) => {
const { id } = req.params;
if (!id || typeof id !== "string") {
res.status(400).json({ error: "Invalid bootstrap job id" });
return;
}
const { id } = req.params; // Assuming ID is always valid, add Zod validation later
try {
let job = await getBootstrapJobById(id);
if (!job) {
res.status(404).json({ error: "Bootstrap job not found" });
return;
}
if (!job) { res.status(404).json({ error: "Bootstrap job not found" }); return; }
const advanced = await advanceBootstrapJob(job);
if (advanced) job = advanced;
const base = {
bootstrapJobId: job.id,
// Remove SSH private key from response if it has been delivered
const sshPrivateKey = job.sshPrivateKey && !job.sshKeyDelivered ? job.sshPrivateKey : undefined;
res.json({
jobId: job.id,
state: job.state,
createdAt: job.createdAt.toISOString(),
updatedAt: job.updatedAt.toISOString(),
amountSats: job.amountSats,
createdAt: job.createdAt,
};
...(job.state === "awaiting_payment" ? {
bootstrapInvoice: {
paymentRequest: job.paymentRequest,
amountSats: job.amountSats,
paymentHash: job.paymentHash,
},
} : {}),
...(job.state === "ready" ? {
dropletId: job.dropletId,
nodeIp: job.nodeIp,
tailscaleHostname: job.tailscaleHostname,
lnbitsUrl: job.lnbitsUrl,
sshPrivateKey: sshPrivateKey, // Only return if not yet delivered
sshKeyDelivered: job.sshKeyDelivered,
} : {}),
...(job.state === "failed" ? { errorMessage: job.errorMessage } : {}),
});
switch (job.state) {
case "awaiting_payment":
res.json({
...base,
invoice: {
paymentRequest: job.paymentRequest,
amountSats: job.amountSats,
paymentHash: job.paymentHash,
},
message: "Waiting for Lightning payment",
});
break;
case "provisioning":
res.json({
...base,
message: "Payment confirmed — provisioning your Bitcoin node. Poll again in ~30 s.",
});
break;
case "ready": {
// Atomic one-time SSH key delivery: only the request that wins the
// guarded UPDATE (WHERE ssh_key_delivered = false) delivers the key.
// Concurrent first-reads both see delivered=false in the pre-fetched
// job, but only one UPDATE matches — the other gets 0 rows and falls
// back to the "already delivered" note.
let sshPrivateKey: string | null = null;
let keyNote: string | null = null;
if (!job.sshKeyDelivered && job.sshPrivateKey) {
const won = await db
.update(bootstrapJobs)
.set({ sshKeyDelivered: true, sshPrivateKey: null, updatedAt: new Date() })
.where(and(eq(bootstrapJobs.id, job.id), eq(bootstrapJobs.sshKeyDelivered, false)))
.returning({ id: bootstrapJobs.id });
if (won.length > 0) {
// This request won the delivery race — return the key we pre-read.
sshPrivateKey = job.sshPrivateKey;
} else {
keyNote = "SSH private key was delivered on a concurrent request — check your records";
}
} else {
keyNote = "SSH private key was delivered on first retrieval — check your records";
}
res.json({
...base,
credentials: {
nodeIp: job.nodeIp,
tailscaleHostname: job.tailscaleHostname,
lnbitsUrl: job.lnbitsUrl,
sshPrivateKey,
...(keyNote ? { sshKeyNote: keyNote } : {}),
},
nextSteps: [
`SSH into your node using the private key above: ssh -i <key_file> root@${job.nodeIp ?? "<nodeIp>"}`,
"Read your node credentials: cat /root/node-credentials.txt",
"Monitor Bitcoin sync (takes 1-2 weeks to reach 100%): bash /opt/timmy-node/ops.sh sync",
"Once sync is complete, fund your LND wallet, then open LNbits to create your wallet and get the API key",
"Set LNBITS_URL and LNBITS_API_KEY in your Timmy deployment to enable payment processing",
],
stubMode: provisionerService.stubMode,
message: provisionerService.stubMode
? "Stub mode — these are fake credentials. Set DO_API_TOKEN for real provisioning."
: "Your node is being bootstrapped. Bitcoin sync has started.",
});
break;
}
case "failed":
res.json({
...base,
errorMessage: job.errorMessage,
message: "Provisioning failed. Contact the operator for a refund.",
});
break;
default:
res.json(base);
// Mark SSH key as delivered after it's returned to the user once
if (job.sshPrivateKey && !job.sshKeyDelivered && job.state === "ready") {
await db.update(bootstrapJobs).set({ sshKeyDelivered: true, updatedAt: new Date() }).where(eq(bootstrapJobs.id, id));
logger.info("SSH private key marked as delivered", { jobId: job.id });
}
} catch (err) {
const message = err instanceof Error ? err.message : "Failed to fetch bootstrap job";
logger.error("bootstrap job fetch failed", { error: message });
res.status(500).json({ error: message });
}
});
export default router;
export default router;

View File

@@ -115,6 +115,8 @@ The `costLedger` in `GET /api/jobs/:id` shows all figures side-by-side. If `refu
| `DO_REGION` | DO datacenter region | `nyc3` |
| `DO_SIZE` | DO droplet size slug | `s-4vcpu-8gb` |
| `DO_VOLUME_SIZE_GB` | Block volume to attach in GB (`0` = none) | `0` |
| `DO_VPC_UUID` | Digital Ocean VPC UUID to deploy droplet into | (required) |
| `DO_SSH_KEY_FINGERPRINT` | Digital Ocean SSH Key Fingerprint for droplet access | (required) |
| `TAILSCALE_API_KEY` | Tailscale API key for generating auth keys | optional |
| `TAILSCALE_TAILNET` | Tailscale tailnet name (e.g. `example.com`) | required with above |