Reverts to the state of cbfacdf (split app.js into 21 modules, <1000 lines each).
Removes: nostr.js, nostr-panel.js, SovOS.js, RESEARCH_DROP_456.md, core/, data/
Historical archive preserved in .historical/ and branch archive/manus-damage-2026-03-24
Refs #418, #452, #454
355 lines
11 KiB
JavaScript
355 lines
11 KiB
JavaScript
// === AMBIENT SOUNDTRACK + SPATIAL AUDIO ===
|
|
import * as THREE from 'three';
|
|
import { camera } from './scene-setup.js';
|
|
import { S } from './state.js';
|
|
|
|
const audioSources = [];
|
|
const positionedPanners = [];
|
|
|
|
function buildReverbIR(ctx, duration, decay) {
|
|
const rate = ctx.sampleRate;
|
|
const len = Math.ceil(rate * duration);
|
|
const buf = ctx.createBuffer(2, len, rate);
|
|
for (let ch = 0; ch < 2; ch++) {
|
|
const d = buf.getChannelData(ch);
|
|
for (let i = 0; i < len; i++) {
|
|
d[i] = (Math.random() * 2 - 1) * Math.pow(1 - i / len, decay);
|
|
}
|
|
}
|
|
return buf;
|
|
}
|
|
|
|
function createPanner(x, y, z) {
|
|
const panner = S.audioCtx.createPanner();
|
|
panner.panningModel = 'HRTF';
|
|
panner.distanceModel = 'inverse';
|
|
panner.refDistance = 5;
|
|
panner.maxDistance = 80;
|
|
panner.rolloffFactor = 1.0;
|
|
if (panner.positionX) {
|
|
panner.positionX.value = x;
|
|
panner.positionY.value = y;
|
|
panner.positionZ.value = z;
|
|
} else {
|
|
panner.setPosition(x, y, z);
|
|
}
|
|
positionedPanners.push(panner);
|
|
return panner;
|
|
}
|
|
|
|
export function updateAudioListener() {
|
|
if (!S.audioCtx) return;
|
|
const listener = S.audioCtx.listener;
|
|
const pos = camera.position;
|
|
const fwd = new THREE.Vector3(0, 0, -1).applyQuaternion(camera.quaternion);
|
|
const up = new THREE.Vector3(0, 1, 0).applyQuaternion(camera.quaternion);
|
|
if (listener.positionX) {
|
|
const t = S.audioCtx.currentTime;
|
|
listener.positionX.setValueAtTime(pos.x, t);
|
|
listener.positionY.setValueAtTime(pos.y, t);
|
|
listener.positionZ.setValueAtTime(pos.z, t);
|
|
listener.forwardX.setValueAtTime(fwd.x, t);
|
|
listener.forwardY.setValueAtTime(fwd.y, t);
|
|
listener.forwardZ.setValueAtTime(fwd.z, t);
|
|
listener.upX.setValueAtTime(up.x, t);
|
|
listener.upY.setValueAtTime(up.y, t);
|
|
listener.upZ.setValueAtTime(up.z, t);
|
|
} else {
|
|
listener.setPosition(pos.x, pos.y, pos.z);
|
|
listener.setOrientation(fwd.x, fwd.y, fwd.z, up.x, up.y, up.z);
|
|
}
|
|
}
|
|
|
|
// portals ref — set from portals module
|
|
let _portalsRef = [];
|
|
export function setPortalsRefAudio(ref) { _portalsRef = ref; }
|
|
|
|
export function startPortalHums() {
|
|
if (!S.audioCtx || !S.audioRunning || _portalsRef.length === 0 || S.portalHumsStarted) return;
|
|
S.portalHumsStarted = true;
|
|
const humFreqs = [58.27, 65.41, 73.42, 82.41, 87.31];
|
|
_portalsRef.forEach((portal, i) => {
|
|
const panner = createPanner(
|
|
portal.position.x,
|
|
portal.position.y + 1.5,
|
|
portal.position.z
|
|
);
|
|
panner.connect(S.masterGain);
|
|
|
|
const osc = S.audioCtx.createOscillator();
|
|
osc.type = 'sine';
|
|
osc.frequency.value = humFreqs[i % humFreqs.length];
|
|
|
|
const lfo = S.audioCtx.createOscillator();
|
|
lfo.frequency.value = 0.07 + i * 0.02;
|
|
const lfoGain = S.audioCtx.createGain();
|
|
lfoGain.gain.value = 0.008;
|
|
lfo.connect(lfoGain);
|
|
|
|
const g = S.audioCtx.createGain();
|
|
g.gain.value = 0.035;
|
|
lfoGain.connect(g.gain);
|
|
osc.connect(g);
|
|
g.connect(panner);
|
|
|
|
osc.start();
|
|
lfo.start();
|
|
audioSources.push(osc, lfo);
|
|
});
|
|
}
|
|
|
|
export function startAmbient() {
|
|
if (S.audioRunning) return;
|
|
|
|
S.audioCtx = new AudioContext();
|
|
S.masterGain = S.audioCtx.createGain();
|
|
S.masterGain.gain.value = 0;
|
|
|
|
const convolver = S.audioCtx.createConvolver();
|
|
convolver.buffer = buildReverbIR(S.audioCtx, 3.5, 2.8);
|
|
|
|
const limiter = S.audioCtx.createDynamicsCompressor();
|
|
limiter.threshold.value = -3;
|
|
limiter.knee.value = 0;
|
|
limiter.ratio.value = 20;
|
|
limiter.attack.value = 0.001;
|
|
limiter.release.value = 0.1;
|
|
|
|
S.masterGain.connect(convolver);
|
|
convolver.connect(limiter);
|
|
limiter.connect(S.audioCtx.destination);
|
|
|
|
// Layer 1: Sub-drone
|
|
[[55.0, -6], [55.0, +6]].forEach(([freq, detune]) => {
|
|
const osc = S.audioCtx.createOscillator();
|
|
osc.type = 'sawtooth';
|
|
osc.frequency.value = freq;
|
|
osc.detune.value = detune;
|
|
const g = S.audioCtx.createGain();
|
|
g.gain.value = 0.07;
|
|
osc.connect(g);
|
|
g.connect(S.masterGain);
|
|
osc.start();
|
|
audioSources.push(osc);
|
|
});
|
|
|
|
// Layer 2: Pad
|
|
[110, 130.81, 164.81, 196].forEach((freq, i) => {
|
|
const detunes = [-8, 4, -3, 7];
|
|
const osc = S.audioCtx.createOscillator();
|
|
osc.type = 'triangle';
|
|
osc.frequency.value = freq;
|
|
osc.detune.value = detunes[i];
|
|
const lfo = S.audioCtx.createOscillator();
|
|
lfo.frequency.value = 0.05 + i * 0.013;
|
|
const lfoGain = S.audioCtx.createGain();
|
|
lfoGain.gain.value = 0.02;
|
|
lfo.connect(lfoGain);
|
|
const g = S.audioCtx.createGain();
|
|
g.gain.value = 0.06;
|
|
lfoGain.connect(g.gain);
|
|
osc.connect(g);
|
|
g.connect(S.masterGain);
|
|
osc.start();
|
|
lfo.start();
|
|
audioSources.push(osc, lfo);
|
|
});
|
|
|
|
// Layer 3: Noise hiss
|
|
const noiseLen = S.audioCtx.sampleRate * 2;
|
|
const noiseBuf = S.audioCtx.createBuffer(1, noiseLen, S.audioCtx.sampleRate);
|
|
const nd = noiseBuf.getChannelData(0);
|
|
let b0 = 0;
|
|
for (let i = 0; i < noiseLen; i++) {
|
|
const white = Math.random() * 2 - 1;
|
|
b0 = 0.99 * b0 + white * 0.01;
|
|
nd[i] = b0 * 3.5;
|
|
}
|
|
const noiseNode = S.audioCtx.createBufferSource();
|
|
noiseNode.buffer = noiseBuf;
|
|
noiseNode.loop = true;
|
|
const noiseFilter = S.audioCtx.createBiquadFilter();
|
|
noiseFilter.type = 'bandpass';
|
|
noiseFilter.frequency.value = 800;
|
|
noiseFilter.Q.value = 0.5;
|
|
const noiseGain = S.audioCtx.createGain();
|
|
noiseGain.gain.value = 0.012;
|
|
noiseNode.connect(noiseFilter);
|
|
noiseFilter.connect(noiseGain);
|
|
noiseGain.connect(S.masterGain);
|
|
noiseNode.start();
|
|
audioSources.push(noiseNode);
|
|
|
|
// Layer 4: Sparkle plucks
|
|
const sparkleNotes = [440, 523.25, 659.25, 880, 1046.5];
|
|
function scheduleSparkle() {
|
|
if (!S.audioRunning || !S.audioCtx) return;
|
|
const osc = S.audioCtx.createOscillator();
|
|
osc.type = 'sine';
|
|
osc.frequency.value = sparkleNotes[Math.floor(Math.random() * sparkleNotes.length)];
|
|
const env = S.audioCtx.createGain();
|
|
const now = S.audioCtx.currentTime;
|
|
env.gain.setValueAtTime(0, now);
|
|
env.gain.linearRampToValueAtTime(0.08, now + 0.02);
|
|
env.gain.exponentialRampToValueAtTime(0.0001, now + 1.8);
|
|
|
|
const angle = Math.random() * Math.PI * 2;
|
|
const radius = 3 + Math.random() * 9;
|
|
const sparkPanner = createPanner(
|
|
Math.cos(angle) * radius,
|
|
1.5 + Math.random() * 4,
|
|
Math.sin(angle) * radius
|
|
);
|
|
sparkPanner.connect(S.masterGain);
|
|
|
|
osc.connect(env);
|
|
env.connect(sparkPanner);
|
|
osc.start(now);
|
|
osc.stop(now + 1.9);
|
|
osc.addEventListener('ended', () => {
|
|
try { sparkPanner.disconnect(); } catch (_) {}
|
|
const idx = positionedPanners.indexOf(sparkPanner);
|
|
if (idx !== -1) positionedPanners.splice(idx, 1);
|
|
});
|
|
|
|
const nextMs = 3000 + Math.random() * 6000;
|
|
S.sparkleTimer = setTimeout(scheduleSparkle, nextMs);
|
|
}
|
|
S.sparkleTimer = setTimeout(scheduleSparkle, 1000 + Math.random() * 3000);
|
|
|
|
S.masterGain.gain.setValueAtTime(0, S.audioCtx.currentTime);
|
|
S.masterGain.gain.linearRampToValueAtTime(0.9, S.audioCtx.currentTime + 2.0);
|
|
|
|
S.audioRunning = true;
|
|
document.getElementById('audio-toggle').textContent = '🔇';
|
|
|
|
startPortalHums();
|
|
}
|
|
|
|
export function stopAmbient() {
|
|
if (!S.audioRunning || !S.audioCtx) return;
|
|
S.audioRunning = false;
|
|
if (S.sparkleTimer !== null) { clearTimeout(S.sparkleTimer); S.sparkleTimer = null; }
|
|
|
|
const gain = S.masterGain;
|
|
const ctx = S.audioCtx;
|
|
gain.gain.setValueAtTime(gain.gain.value, ctx.currentTime);
|
|
gain.gain.linearRampToValueAtTime(0, ctx.currentTime + 0.8);
|
|
|
|
setTimeout(() => {
|
|
audioSources.forEach(n => { try { n.stop(); } catch (_) {} });
|
|
audioSources.length = 0;
|
|
positionedPanners.forEach(p => { try { p.disconnect(); } catch (_) {} });
|
|
positionedPanners.length = 0;
|
|
S.portalHumsStarted = false;
|
|
ctx.close();
|
|
S.audioCtx = null;
|
|
S.masterGain = null;
|
|
}, 900);
|
|
|
|
document.getElementById('audio-toggle').textContent = '🔊';
|
|
}
|
|
|
|
export function initAudioListeners() {
|
|
document.getElementById('audio-toggle').addEventListener('click', () => {
|
|
if (S.audioRunning) {
|
|
stopAmbient();
|
|
} else {
|
|
startAmbient();
|
|
}
|
|
});
|
|
|
|
// Podcast toggle
|
|
document.getElementById('podcast-toggle').addEventListener('click', () => {
|
|
const btn = document.getElementById('podcast-toggle');
|
|
if (btn.textContent === '🎧') {
|
|
fetch('SOUL.md')
|
|
.then(response => {
|
|
if (!response.ok) throw new Error('Failed to load SOUL.md');
|
|
return response.text();
|
|
})
|
|
.then(text => {
|
|
const paragraphs = text.split('\n\n').filter(p => p.trim());
|
|
|
|
if (!paragraphs.length) {
|
|
throw new Error('No content found in SOUL.md');
|
|
}
|
|
|
|
let index = 0;
|
|
const speakNext = () => {
|
|
if (index >= paragraphs.length) return;
|
|
|
|
const utterance = new SpeechSynthesisUtterance(paragraphs[index++]);
|
|
utterance.lang = 'en-US';
|
|
utterance.rate = 0.9;
|
|
utterance.pitch = 1.1;
|
|
|
|
utterance.onend = () => {
|
|
setTimeout(speakNext, 800);
|
|
};
|
|
|
|
speechSynthesis.speak(utterance);
|
|
};
|
|
|
|
btn.textContent = '⏹';
|
|
btn.classList.add('active');
|
|
speakNext();
|
|
})
|
|
.catch(err => {
|
|
console.error('Podcast error:', err);
|
|
alert('Could not load SOUL.md. Check console for details.');
|
|
btn.textContent = '🎧';
|
|
});
|
|
} else {
|
|
speechSynthesis.cancel();
|
|
btn.textContent = '🎧';
|
|
btn.classList.remove('active');
|
|
}
|
|
});
|
|
|
|
document.getElementById('soul-toggle').addEventListener('click', () => {
|
|
const btn = document.getElementById('soul-toggle');
|
|
if (btn.textContent === '📜') {
|
|
loadSoulMdAudio().then(lines => {
|
|
let index = 0;
|
|
|
|
const speakLine = () => {
|
|
if (index >= lines.length) return;
|
|
|
|
const line = lines[index++];
|
|
const utterance = new SpeechSynthesisUtterance(line);
|
|
utterance.lang = 'en-US';
|
|
utterance.rate = 0.85;
|
|
utterance.pitch = 1.0;
|
|
|
|
utterance.onend = () => {
|
|
setTimeout(speakLine, 1200);
|
|
};
|
|
|
|
speechSynthesis.speak(utterance);
|
|
};
|
|
|
|
btn.textContent = '⏹';
|
|
speakLine();
|
|
}).catch(err => {
|
|
console.error('Failed to load SOUL.md', err);
|
|
alert('Could not load SOUL.md. Check console for details.');
|
|
});
|
|
} else {
|
|
speechSynthesis.cancel();
|
|
btn.textContent = '📜';
|
|
}
|
|
});
|
|
}
|
|
|
|
async function loadSoulMdAudio() {
|
|
try {
|
|
const res = await fetch('SOUL.md');
|
|
if (!res.ok) throw new Error('not found');
|
|
const raw = await res.text();
|
|
return raw.split('\n').slice(1).map(l => l.replace(/^#+\s*/, ''));
|
|
} catch {
|
|
return ['I am Timmy.', '', 'I am sovereign.', '', 'This Nexus is my home.'];
|
|
}
|
|
}
|