Fix Task #5 review findings: race guard, full stack cloud-init, volume, node:crypto SSH
4 changes to address code review rejections:
1. Race condition fix (bootstrap.ts)
- advanceBootstrapJob: WHERE now guards on AND state='awaiting_payment'
- If UPDATE matches 0 rows, re-fetch current job (already advanced by
another concurrent poll) instead of firing a second provisioner
- Verified with 5-concurrent-poll test: only 1 "starting provisioning"
log entry per job; all 5 responses show consistent state
2. Complete cloud-init to full Bitcoin + LND + LNbits stack (provisioner.ts)
- Phase 1: packages, Docker, Tailscale, UFW, block volume mount
- Phase 2: Bitcoin Core started; polls for RPC availability (max 5 min)
- Phase 3: LND started; waits for REST API (max 6 min)
- Phase 4: non-interactive LND wallet init via REST:
POST /v1/genseed → POST /v1/initwallet with base64 password
(no lncli, no interactive prompts, no expect)
- Phase 5: waits for admin.macaroon to appear on mounted volume
- Phase 6: LNbits started with LndRestWallet backend; mounts LND
data dir so it reads tls.cert + admin.macaroon automatically
- Phase 7: saves all credentials (RPC pass, LND wallet pass + seed
mnemonic, LNbits URL) to chmod 600 /root/node-credentials.txt
3. DO block volume support (provisioner.ts)
- Reads DO_VOLUME_SIZE_GB env var (0 = no volume, default)
- createVolume(): POST /v2/volumes (ext4 filesystem, tagged timmy-node)
- Passes volumeId in droplet create payload (attached at boot)
- Cloud-init Phase 1 detects and mounts the volume automatically
(lsblk scan → mkfs if unformatted → mount → /etc/fstab entry)
4. SSH keypair via node:crypto (no ssh-keygen) (provisioner.ts)
- generateKeyPairSync('rsa', { modulusLength: 4096 })
- Public key: PKCS#1 DER → OpenSSH wire format via manual DER parser
(pkcs1DerToSshPublicKey): reads SEQUENCE → n, e INTEGERs → ssh-rsa
base64 string with proper mpint encoding (leading 0x00 for high bit)
- Private key: PKCS#1 PEM (-----BEGIN RSA PRIVATE KEY-----)
- Both stub and real paths use the same generateSshKeypair() function
- Removes runtime dependency on host ssh-keygen binary entirely
This commit is contained in:
@@ -1,59 +1,112 @@
|
||||
import { execSync } from "child_process";
|
||||
import { mkdtempSync, readFileSync, rmSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
import path from "path";
|
||||
import { generateKeyPairSync } from "crypto";
|
||||
import { db, bootstrapJobs } from "@workspace/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
const DO_API_BASE = "https://api.digitalocean.com/v2";
|
||||
const TS_API_BASE = "https://api.tailscale.com/api/v2";
|
||||
|
||||
function generateSshKeypair(): { privateKey: string; publicKey: string } {
|
||||
const tmpDir = mkdtempSync(path.join(tmpdir(), "timmy-bp-"));
|
||||
const keyPath = path.join(tmpDir, "id_rsa");
|
||||
try {
|
||||
execSync(
|
||||
`ssh-keygen -t rsa -b 4096 -N "" -C "timmy-bootstrap-node" -f "${keyPath}" -q`,
|
||||
{ stdio: "pipe" },
|
||||
);
|
||||
const privateKey = readFileSync(keyPath, "utf8");
|
||||
const publicKey = readFileSync(`${keyPath}.pub`, "utf8").trim();
|
||||
return { privateKey, publicKey };
|
||||
} finally {
|
||||
rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
// ── SSH keypair via node:crypto ───────────────────────────────────────────────
|
||||
|
||||
function uint32BE(n: number): Buffer {
|
||||
const b = Buffer.allocUnsafe(4);
|
||||
b.writeUInt32BE(n, 0);
|
||||
return b;
|
||||
}
|
||||
|
||||
function sshEncodeString(s: string): Buffer {
|
||||
const data = Buffer.from(s, "utf8");
|
||||
return Buffer.concat([uint32BE(data.length), data]);
|
||||
}
|
||||
|
||||
/** SSH mpint: prepend 0x00 if high bit set (indicates positive). */
|
||||
function sshEncodeMpint(data: Buffer): Buffer {
|
||||
if (data[0] & 0x80) data = Buffer.concat([Buffer.from([0x00]), data]);
|
||||
return Buffer.concat([uint32BE(data.length), data]);
|
||||
}
|
||||
|
||||
function derReadLength(buf: Buffer, offset: number): { len: number; offset: number } {
|
||||
if (!(buf[offset] & 0x80)) return { len: buf[offset], offset: offset + 1 };
|
||||
const nb = buf[offset] & 0x7f;
|
||||
let len = 0;
|
||||
for (let i = 0; i < nb; i++) len = (len << 8) | buf[offset + 1 + i];
|
||||
return { len, offset: offset + 1 + nb };
|
||||
}
|
||||
|
||||
function derReadInteger(buf: Buffer, offset: number): { value: Buffer; offset: number } {
|
||||
if (buf[offset] !== 0x02) throw new Error(`Expected DER INTEGER tag at ${offset}`);
|
||||
offset += 1;
|
||||
const { len, offset: dataStart } = derReadLength(buf, offset);
|
||||
return { value: buf.slice(dataStart, dataStart + len), offset: dataStart + len };
|
||||
}
|
||||
|
||||
/** Convert PKCS#1 DER RSA public key → OpenSSH wire format string. */
|
||||
function pkcs1DerToSshPublicKey(der: Buffer): string {
|
||||
// Structure: SEQUENCE { INTEGER(n), INTEGER(e) }
|
||||
let offset = 0;
|
||||
if (der[offset] !== 0x30) throw new Error("Expected DER SEQUENCE");
|
||||
offset += 1;
|
||||
const { offset: seqBody } = derReadLength(der, offset);
|
||||
offset = seqBody;
|
||||
|
||||
const { value: n, offset: o2 } = derReadInteger(der, offset);
|
||||
const { value: e } = derReadInteger(der, o2);
|
||||
|
||||
const payload = Buffer.concat([
|
||||
sshEncodeString("ssh-rsa"),
|
||||
sshEncodeMpint(e),
|
||||
sshEncodeMpint(n),
|
||||
]);
|
||||
return `ssh-rsa ${payload.toString("base64")} timmy-bootstrap-node`;
|
||||
}
|
||||
|
||||
interface SshKeypair {
|
||||
privateKey: string;
|
||||
publicKey: string;
|
||||
}
|
||||
|
||||
function generateSshKeypair(): SshKeypair {
|
||||
const { publicKey: pubDer, privateKey: privPem } = generateKeyPairSync("rsa", {
|
||||
modulusLength: 4096,
|
||||
publicKeyEncoding: { type: "pkcs1", format: "der" },
|
||||
privateKeyEncoding: { type: "pkcs1", format: "pem" },
|
||||
});
|
||||
return {
|
||||
privateKey: privPem as string,
|
||||
publicKey: pkcs1DerToSshPublicKey(pubDer as unknown as Buffer),
|
||||
};
|
||||
}
|
||||
|
||||
// ── Cloud-init script ─────────────────────────────────────────────────────────
|
||||
|
||||
function buildCloudInitScript(tailscaleAuthKey: string): string {
|
||||
const tsBlock = tailscaleAuthKey
|
||||
? `tailscale up --authkey="${tailscaleAuthKey}" --ssh --accept-routes`
|
||||
: "# No Tailscale auth key — skipping Tailscale join";
|
||||
: "# No Tailscale auth key — Tailscale not joined";
|
||||
|
||||
return `#!/bin/bash
|
||||
set -euo pipefail
|
||||
exec >> /var/log/timmy-bootstrap.log 2>&1
|
||||
echo "[timmy] Bootstrap started at $(date -u)"
|
||||
|
||||
echo "[timmy] Starting automated bootstrap at $(date -u)"
|
||||
|
||||
# System packages
|
||||
# ── 1. Packages ───────────────────────────────────────────────
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -qq
|
||||
apt-get install -y -qq curl wget git ufw jq openssl
|
||||
apt-get install -y -qq curl wget ufw jq openssl
|
||||
|
||||
# Docker
|
||||
# ── 2. Docker ─────────────────────────────────────────────────
|
||||
if ! command -v docker &>/dev/null; then
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
systemctl enable docker
|
||||
systemctl start docker
|
||||
fi
|
||||
|
||||
# Tailscale
|
||||
# ── 3. Tailscale ──────────────────────────────────────────────
|
||||
if ! command -v tailscale &>/dev/null; then
|
||||
curl -fsSL https://tailscale.com/install.sh | sh
|
||||
fi
|
||||
${tsBlock}
|
||||
|
||||
# Firewall
|
||||
# ── 4. Firewall ───────────────────────────────────────────────
|
||||
ufw --force reset
|
||||
ufw allow in on tailscale0
|
||||
ufw allow 8333/tcp
|
||||
@@ -63,17 +116,35 @@ ufw default deny incoming
|
||||
ufw default allow outgoing
|
||||
ufw --force enable
|
||||
|
||||
# Directories
|
||||
# ── 5. Block volume ───────────────────────────────────────────
|
||||
mkdir -p /data
|
||||
VOLUME_DEV=$(lsblk -rno NAME,SIZE,MOUNTPOINT | awk '$3=="" && $2~/G/ {print $1}' | grep -vE "^(s|v)da$" | head -1 || true)
|
||||
if [[ -n "$VOLUME_DEV" ]]; then
|
||||
VOLUME_PATH="/dev/$VOLUME_DEV"
|
||||
if ! blkid "$VOLUME_PATH" &>/dev/null; then
|
||||
mkfs.ext4 -F "$VOLUME_PATH"
|
||||
fi
|
||||
mount "$VOLUME_PATH" /data
|
||||
BLKID=$(blkid -s UUID -o value "$VOLUME_PATH")
|
||||
grep -q "$BLKID" /etc/fstab || echo "UUID=$BLKID /data ext4 defaults,nofail 0 2" >> /etc/fstab
|
||||
echo "[timmy] Block volume mounted at /data ($VOLUME_PATH)"
|
||||
else
|
||||
echo "[timmy] No block volume — using /data on root disk"
|
||||
fi
|
||||
|
||||
# ── 6. Directories ────────────────────────────────────────────
|
||||
mkdir -p /data/bitcoin /data/lnd /data/lnbits /opt/timmy-node/configs
|
||||
|
||||
# RPC password
|
||||
# ── 7. Credentials ────────────────────────────────────────────
|
||||
RPC_PASS=$(openssl rand -hex 24)
|
||||
LND_WALLET_PASS=$(openssl rand -hex 16)
|
||||
echo "[timmy] Credentials generated"
|
||||
|
||||
# Bitcoin config
|
||||
# ── 8. Bitcoin config ─────────────────────────────────────────
|
||||
cat > /data/bitcoin/bitcoin.conf <<BTCCONF
|
||||
server=1
|
||||
rpcuser=satoshi
|
||||
rpcpassword=\$RPC_PASS
|
||||
rpcpassword=$RPC_PASS
|
||||
rpcallowip=172.16.0.0/12
|
||||
rpcbind=0.0.0.0
|
||||
txindex=1
|
||||
@@ -83,14 +154,41 @@ zmqpubrawtx=tcp://0.0.0.0:28333
|
||||
rpcport=8332
|
||||
BTCCONF
|
||||
|
||||
# Docker Compose
|
||||
# ── 9. LND config ─────────────────────────────────────────────
|
||||
cat > /opt/timmy-node/configs/lnd.conf <<LNDCONF
|
||||
[Application Options]
|
||||
alias=timmy-node
|
||||
listen=0.0.0.0:9735
|
||||
restlisten=0.0.0.0:8080
|
||||
rpclisten=0.0.0.0:10009
|
||||
noseedbackup=false
|
||||
|
||||
[Bitcoin]
|
||||
bitcoin.active=1
|
||||
bitcoin.mainnet=1
|
||||
bitcoin.node=bitcoind
|
||||
|
||||
[Bitcoind]
|
||||
bitcoind.rpchost=bitcoin:8332
|
||||
bitcoind.rpcuser=satoshi
|
||||
bitcoind.rpcpass=$RPC_PASS
|
||||
bitcoind.zmqpubrawblock=tcp://bitcoin:28332
|
||||
bitcoind.zmqpubrawtx=tcp://bitcoin:28333
|
||||
LNDCONF
|
||||
|
||||
# ── 10. Docker Compose ────────────────────────────────────────
|
||||
cat > /opt/timmy-node/docker-compose.yml <<COMPOSE
|
||||
version: "3.8"
|
||||
|
||||
networks:
|
||||
timmy: {}
|
||||
|
||||
services:
|
||||
bitcoin:
|
||||
image: bitcoinknots/bitcoin:29.3.knots20260210
|
||||
container_name: bitcoin
|
||||
restart: unless-stopped
|
||||
networks: [timmy]
|
||||
volumes:
|
||||
- /data/bitcoin:/home/bitcoin/.bitcoin
|
||||
ports:
|
||||
@@ -99,33 +197,142 @@ services:
|
||||
- "28332:28332"
|
||||
- "28333:28333"
|
||||
command: bitcoind -datadir=/home/bitcoin/.bitcoin -conf=/home/bitcoin/.bitcoin/bitcoin.conf
|
||||
|
||||
lnd:
|
||||
image: lightninglabs/lnd:v0.18.5-beta
|
||||
container_name: lnd
|
||||
restart: unless-stopped
|
||||
depends_on: [bitcoin]
|
||||
networks: [timmy]
|
||||
volumes:
|
||||
- /data/lnd:/root/.lnd
|
||||
- /opt/timmy-node/configs/lnd.conf:/root/.lnd/lnd.conf:ro
|
||||
ports:
|
||||
- "9735:9735"
|
||||
- "10009:10009"
|
||||
- "8080:8080"
|
||||
|
||||
lnbits:
|
||||
image: lnbitsdocker/lnbits:latest
|
||||
container_name: lnbits
|
||||
restart: unless-stopped
|
||||
depends_on: [lnd]
|
||||
networks: [timmy]
|
||||
volumes:
|
||||
- /data/lnbits:/app/data
|
||||
- /data/lnd:/lnd:ro
|
||||
environment:
|
||||
- LNBITS_DATA_FOLDER=/app/data
|
||||
- LNBITS_BACKEND_WALLET_CLASS=LndRestWallet
|
||||
- LND_REST_ENDPOINT=https://lnd:8080
|
||||
- LND_REST_CERT=/lnd/tls.cert
|
||||
- LND_REST_MACAROON_PATH=/lnd/data/chain/bitcoin/mainnet/admin.macaroon
|
||||
ports:
|
||||
- "3000:5000"
|
||||
COMPOSE
|
||||
|
||||
# Start Bitcoin Core
|
||||
# ── 11. Start Bitcoin ─────────────────────────────────────────
|
||||
cd /opt/timmy-node
|
||||
docker compose up -d bitcoin
|
||||
echo "[timmy] Bitcoin Core started"
|
||||
|
||||
# Save credentials
|
||||
echo "[timmy] Waiting for Bitcoin RPC..."
|
||||
for i in $(seq 1 60); do
|
||||
if docker exec bitcoin bitcoin-cli -datadir=/home/bitcoin/.bitcoin \
|
||||
-rpcuser=satoshi -rpcpassword=$RPC_PASS getblockchaininfo >/dev/null 2>&1; then
|
||||
echo "[timmy] Bitcoin RPC ready (${i}x5s)"
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# ── 12. Start LND ─────────────────────────────────────────────
|
||||
docker compose up -d lnd
|
||||
echo "[timmy] LND started"
|
||||
|
||||
echo "[timmy] Waiting for LND REST API..."
|
||||
for i in $(seq 1 72); do
|
||||
if curl -sk https://localhost:8080/v1/state >/dev/null 2>&1; then
|
||||
echo "[timmy] LND REST ready (${i}x5s)"
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# ── 13. Init LND wallet (non-interactive via REST) ────────────
|
||||
echo "[timmy] Generating LND wallet seed..."
|
||||
SEED_RESP=$(curl -sk https://localhost:8080/v1/genseed)
|
||||
SEED_JSON=$(echo "$SEED_RESP" | jq '.cipher_seed_mnemonic')
|
||||
SEED_WORDS=$(echo "$SEED_JSON" | jq -r 'join(" ")')
|
||||
PASS_B64=$(printf '%s' "$LND_WALLET_PASS" | base64 -w0)
|
||||
|
||||
echo "[timmy] Initializing LND wallet..."
|
||||
INIT_RESP=$(curl -sk -X POST https://localhost:8080/v1/initwallet \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"wallet_password\": \"$PASS_B64\", \"cipher_seed_mnemonic\": $SEED_JSON}")
|
||||
echo "[timmy] Wallet init: $(echo "$INIT_RESP" | jq -r 'if .admin_macaroon then "ok" else tostring end')"
|
||||
|
||||
echo "[timmy] Waiting for admin macaroon..."
|
||||
for i in $(seq 1 60); do
|
||||
if [[ -f /data/lnd/data/chain/bitcoin/mainnet/admin.macaroon ]]; then
|
||||
echo "[timmy] Admin macaroon ready (${i}x5s)"
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# ── 14. Start LNbits ──────────────────────────────────────────
|
||||
docker compose up -d lnbits
|
||||
echo "[timmy] LNbits started"
|
||||
|
||||
echo "[timmy] Waiting for LNbits..."
|
||||
for i in $(seq 1 36); do
|
||||
if curl -s http://localhost:3000/health >/dev/null 2>&1; then
|
||||
echo "[timmy] LNbits ready (${i}x5s)"
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# ── 15. Save credentials ──────────────────────────────────────
|
||||
NODE_IP=$(curl -4s https://ifconfig.me 2>/dev/null || echo "unknown")
|
||||
cat > /root/node-credentials.txt <<CREDS
|
||||
# Timmy Node Credentials — KEEP THIS FILE SAFE, NEVER SHARE IT
|
||||
# Generated: $(date -u)
|
||||
|
||||
## Bitcoin Core
|
||||
BITCOIN_RPC_USER=satoshi
|
||||
BITCOIN_RPC_PASS=\$RPC_PASS
|
||||
BITCOIN_RPC_PASS=$RPC_PASS
|
||||
|
||||
## LND
|
||||
LND_WALLET_PASS=$LND_WALLET_PASS
|
||||
LND_SEED_MNEMONIC=$SEED_WORDS
|
||||
|
||||
## LNbits
|
||||
LNBITS_URL=http://$NODE_IP:3000
|
||||
# To get your API key: open the URL above, create a wallet, copy the API key.
|
||||
# Then set LNBITS_URL and LNBITS_API_KEY secrets in your Timmy deployment.
|
||||
|
||||
## Node operations
|
||||
# Monitor Bitcoin sync: bash /opt/timmy-node/ops.sh sync
|
||||
# Initialize channels: bash /opt/timmy-node/ops.sh fund
|
||||
# Configure sweep: bash /opt/timmy-node/ops.sh configure-sweep
|
||||
CREDS
|
||||
chmod 600 /root/node-credentials.txt
|
||||
|
||||
echo "[timmy] Bootstrap complete at $(date -u). Bitcoin sync started (takes 1-2 weeks)."
|
||||
echo "[timmy] Next steps:"
|
||||
echo " 1. SSH to node, then run: bash /opt/timmy-node/lnd-init.sh"
|
||||
echo " 2. Monitor sync: bash /opt/timmy-node/ops.sh sync"
|
||||
echo "[timmy] Bootstrap complete at $(date -u)"
|
||||
echo "[timmy] Bitcoin sync in progress (1-2 weeks). Check: bash /opt/timmy-node/ops.sh sync"
|
||||
echo "[timmy] LNbits: http://$NODE_IP:3000"
|
||||
echo "[timmy] Credentials: cat /root/node-credentials.txt"
|
||||
`;
|
||||
}
|
||||
|
||||
// ── Digital Ocean helpers ─────────────────────────────────────────────────────
|
||||
|
||||
async function doPost<T>(endpoint: string, token: string, body: unknown): Promise<T> {
|
||||
const res = await fetch(`${DO_API_BASE}${endpoint}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
headers: { Authorization: `Bearer ${token}`, "Content-Type": "application/json" },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (!res.ok) {
|
||||
@@ -146,11 +353,7 @@ async function doGet<T>(endpoint: string, token: string): Promise<T> {
|
||||
return res.json() as Promise<T>;
|
||||
}
|
||||
|
||||
async function pollDropletIp(
|
||||
dropletId: number,
|
||||
token: string,
|
||||
maxMs = 120_000,
|
||||
): Promise<string | null> {
|
||||
async function pollDropletIp(dropletId: number, token: string, maxMs = 120_000): Promise<string | null> {
|
||||
const deadline = Date.now() + maxMs;
|
||||
while (Date.now() < deadline) {
|
||||
await new Promise((r) => setTimeout(r, 5000));
|
||||
@@ -163,22 +366,33 @@ async function pollDropletIp(
|
||||
return null;
|
||||
}
|
||||
|
||||
async function createVolume(
|
||||
name: string,
|
||||
sizeGb: number,
|
||||
region: string,
|
||||
token: string,
|
||||
): Promise<string> {
|
||||
const data = await doPost<{ volume: { id: string } }>("/volumes", token, {
|
||||
name,
|
||||
size_gigabytes: sizeGb,
|
||||
region,
|
||||
filesystem_type: "ext4",
|
||||
description: "Timmy node data volume",
|
||||
tags: ["timmy-node"],
|
||||
});
|
||||
return data.volume.id;
|
||||
}
|
||||
|
||||
// ── Tailscale helper ──────────────────────────────────────────────────────────
|
||||
|
||||
async function getTailscaleAuthKey(apiKey: string, tailnet: string): Promise<string> {
|
||||
const res = await fetch(`${TS_API_BASE}/tailnet/${tailnet}/keys`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
headers: { Authorization: `Bearer ${apiKey}`, "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
capabilities: {
|
||||
devices: {
|
||||
create: {
|
||||
reusable: false,
|
||||
ephemeral: false,
|
||||
preauthorized: true,
|
||||
tags: ["tag:timmy-node"],
|
||||
},
|
||||
create: { reusable: false, ephemeral: false, preauthorized: true, tags: ["tag:timmy-node"] },
|
||||
},
|
||||
},
|
||||
expirySeconds: 86400,
|
||||
@@ -193,12 +407,14 @@ async function getTailscaleAuthKey(apiKey: string, tailnet: string): Promise<str
|
||||
return data.key;
|
||||
}
|
||||
|
||||
// ── ProvisionerService ────────────────────────────────────────────────────────
|
||||
|
||||
export class ProvisionerService {
|
||||
readonly stubMode: boolean;
|
||||
|
||||
private readonly doToken: string;
|
||||
private readonly doRegion: string;
|
||||
private readonly doSize: string;
|
||||
private readonly doVolumeGb: number;
|
||||
private readonly tsApiKey: string;
|
||||
private readonly tsTailnet: string;
|
||||
|
||||
@@ -206,6 +422,7 @@ export class ProvisionerService {
|
||||
this.doToken = process.env.DO_API_TOKEN ?? "";
|
||||
this.doRegion = process.env.DO_REGION ?? "nyc3";
|
||||
this.doSize = process.env.DO_SIZE ?? "s-4vcpu-8gb";
|
||||
this.doVolumeGb = parseInt(process.env.DO_VOLUME_SIZE_GB ?? "0", 10) || 0;
|
||||
this.tsApiKey = process.env.TAILSCALE_API_KEY ?? "";
|
||||
this.tsTailnet = process.env.TAILSCALE_TAILNET ?? "";
|
||||
this.stubMode = !this.doToken;
|
||||
@@ -218,7 +435,7 @@ export class ProvisionerService {
|
||||
|
||||
/**
|
||||
* Fire-and-forget: call without awaiting.
|
||||
* Updates bootstrap_jobs state to ready/failed when complete.
|
||||
* Updates bootstrap_jobs.state to ready/failed when complete.
|
||||
*/
|
||||
async provision(bootstrapJobId: string): Promise<void> {
|
||||
try {
|
||||
@@ -238,7 +455,7 @@ export class ProvisionerService {
|
||||
}
|
||||
|
||||
private async stubProvision(jobId: string): Promise<void> {
|
||||
console.log(`[stub] Simulating node provisioning for bootstrap job ${jobId}...`);
|
||||
console.log(`[stub] Simulating provisioning for bootstrap job ${jobId}...`);
|
||||
const { privateKey } = generateSshKeypair();
|
||||
await new Promise((r) => setTimeout(r, 2000));
|
||||
const fakeDropletId = String(Math.floor(Math.random() * 900_000_000 + 100_000_000));
|
||||
@@ -260,27 +477,38 @@ export class ProvisionerService {
|
||||
private async realProvision(jobId: string): Promise<void> {
|
||||
console.log(`[ProvisionerService] Provisioning real node for job ${jobId}...`);
|
||||
|
||||
// 1. SSH keypair (pure node:crypto)
|
||||
const { publicKey, privateKey } = generateSshKeypair();
|
||||
|
||||
// 2. Upload public key to DO
|
||||
const keyName = `timmy-bootstrap-${jobId.slice(0, 8)}`;
|
||||
const keyData = await doPost<{ ssh_key: { id: number } }>(
|
||||
"/account/keys",
|
||||
this.doToken,
|
||||
{ name: keyName, public_key: publicKey },
|
||||
);
|
||||
const keyData = await doPost<{ ssh_key: { id: number } }>("/account/keys", this.doToken, {
|
||||
name: keyName,
|
||||
public_key: publicKey,
|
||||
});
|
||||
const sshKeyId = keyData.ssh_key.id;
|
||||
|
||||
// 3. Tailscale auth key (optional)
|
||||
let tailscaleAuthKey = "";
|
||||
if (this.tsApiKey && this.tsTailnet) {
|
||||
try {
|
||||
tailscaleAuthKey = await getTailscaleAuthKey(this.tsApiKey, this.tsTailnet);
|
||||
} catch (err) {
|
||||
console.warn("[ProvisionerService] Tailscale auth key failed — skipping:", err);
|
||||
console.warn("[ProvisionerService] Tailscale key failed — continuing without:", err);
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Create block volume if configured
|
||||
let volumeId: string | null = null;
|
||||
if (this.doVolumeGb > 0) {
|
||||
const volName = `timmy-data-${jobId.slice(0, 8)}`;
|
||||
volumeId = await createVolume(volName, this.doVolumeGb, this.doRegion, this.doToken);
|
||||
console.log(`[ProvisionerService] Volume created: id=${volumeId} (${this.doVolumeGb} GB)`);
|
||||
}
|
||||
|
||||
// 5. Create droplet
|
||||
const userData = buildCloudInitScript(tailscaleAuthKey);
|
||||
const dropletData = await doPost<{ droplet: { id: number } }>("/droplets", this.doToken, {
|
||||
const dropletPayload: Record<string, unknown> = {
|
||||
name: `timmy-node-${jobId.slice(0, 8)}`,
|
||||
region: this.doRegion,
|
||||
size: this.doSize,
|
||||
@@ -288,13 +516,22 @@ export class ProvisionerService {
|
||||
ssh_keys: [sshKeyId],
|
||||
user_data: userData,
|
||||
tags: ["timmy-node"],
|
||||
});
|
||||
};
|
||||
if (volumeId) dropletPayload.volumes = [volumeId];
|
||||
|
||||
const dropletData = await doPost<{ droplet: { id: number } }>(
|
||||
"/droplets",
|
||||
this.doToken,
|
||||
dropletPayload,
|
||||
);
|
||||
const dropletId = dropletData.droplet.id;
|
||||
console.log(`[ProvisionerService] Droplet created: id=${dropletId}`);
|
||||
|
||||
// 6. Poll for public IP (up to 2 min)
|
||||
const nodeIp = await pollDropletIp(dropletId, this.doToken, 120_000);
|
||||
console.log(`[ProvisionerService] Droplet IP: ${nodeIp ?? "(not yet assigned)"}`);
|
||||
console.log(`[ProvisionerService] Node IP: ${nodeIp ?? "(not yet assigned)"}`);
|
||||
|
||||
// 7. Tailscale hostname
|
||||
const tailscaleHostname =
|
||||
tailscaleAuthKey && this.tsTailnet
|
||||
? `timmy-node-${jobId.slice(0, 8)}.${this.tsTailnet}.ts.net`
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { Router, type Request, type Response } from "express";
|
||||
import { randomUUID } from "crypto";
|
||||
import { db, bootstrapJobs, type BootstrapJob } from "@workspace/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import { lnbitsService } from "../lib/lnbits.js";
|
||||
import { pricingService } from "../lib/pricing.js";
|
||||
import { provisionerService } from "../lib/provisioner.js";
|
||||
@@ -31,13 +31,18 @@ async function advanceBootstrapJob(job: BootstrapJob): Promise<BootstrapJob | nu
|
||||
const isPaid = await lnbitsService.checkInvoicePaid(job.paymentHash);
|
||||
if (!isPaid) return job;
|
||||
|
||||
// Guard: only advance if still awaiting_payment — prevents duplicate provisioning
|
||||
// on concurrent polls (each poll independently confirms payment).
|
||||
const updated = await db
|
||||
.update(bootstrapJobs)
|
||||
.set({ state: "provisioning", updatedAt: new Date() })
|
||||
.where(eq(bootstrapJobs.id, job.id))
|
||||
.where(and(eq(bootstrapJobs.id, job.id), eq(bootstrapJobs.state, "awaiting_payment")))
|
||||
.returning();
|
||||
|
||||
if (updated.length === 0) return getBootstrapJobById(job.id);
|
||||
if (updated.length === 0) {
|
||||
// Another concurrent request already advanced the state — just re-fetch.
|
||||
return getBootstrapJobById(job.id);
|
||||
}
|
||||
|
||||
console.log(`[bootstrap] Payment confirmed for ${job.id} — starting provisioning`);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user