Refactor Windows audio capture setup to streamline processing and remove redundant logging
Some checks failed
Build and Release / build (x64, ubuntu-latest, linux) (push) Has been skipped
Build and Release / build (arm64, macos-latest, darwin) (push) Has been cancelled
Build and Release / build (x64, macos-latest, darwin) (push) Has been cancelled
Build and Release / build (x64, windows-latest, win32) (push) Has been cancelled
Build and Release / release (push) Has been cancelled

This commit is contained in:
Илья Глазунов 2026-01-15 20:45:34 +03:00
parent 2f013b4751
commit 76d6fc2749
2 changed files with 27 additions and 76 deletions

View File

@ -1,7 +1,7 @@
{
"name": "cheating-daddy",
"productName": "cheating-daddy",
"version": "0.5.8",
"version": "0.5.9",
"description": "cheating daddy",
"main": "src/index.js",
"scripts": {

View File

@ -295,9 +295,6 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu
console.log('Linux capture started - system audio:', mediaStream.getAudioTracks().length > 0, 'microphone mode:', audioMode);
} else {
// Windows - use display media with loopback for system audio
logToMain('info', '=== Starting Windows audio capture ===');
cheatingDaddy.setStatus('Requesting screen & audio...');
mediaStream = await navigator.mediaDevices.getDisplayMedia({
video: {
frameRate: 1,
@ -313,6 +310,7 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu
},
});
console.log('Windows capture started with loopback audio');
const audioTracks = mediaStream.getAudioTracks();
const videoTracks = mediaStream.getVideoTracks();
@ -328,14 +326,8 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu
})),
});
if (audioTracks.length === 0) {
logToMain('warn', 'WARNING: No audio tracks! User must check "Share audio" in screen picker dialog');
cheatingDaddy.setStatus('Warning: No audio - enable "Share audio" checkbox');
} else {
logToMain('info', 'Audio track acquired, setting up processing...');
// Setup audio processing for Windows loopback audio only
setupWindowsLoopbackProcessing();
}
// Setup audio processing for Windows loopback audio only
setupWindowsLoopbackProcessing();
if (audioMode === 'mic_only' || audioMode === 'both') {
let micStream = null;
@ -451,73 +443,32 @@ function setupLinuxSystemAudioProcessing() {
function setupWindowsLoopbackProcessing() {
// Setup audio processing for Windows loopback audio only
logToMain('info', 'Setting up Windows loopback audio processing...');
audioContext = new AudioContext({ sampleRate: SAMPLE_RATE });
const source = audioContext.createMediaStreamSource(mediaStream);
audioProcessor = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1);
try {
audioContext = new AudioContext({ sampleRate: SAMPLE_RATE });
let audioBuffer = [];
const samplesPerChunk = SAMPLE_RATE * AUDIO_CHUNK_DURATION;
logToMain('info', 'AudioContext created:', {
state: audioContext.state,
sampleRate: audioContext.sampleRate,
});
audioProcessor.onaudioprocess = async e => {
const inputData = e.inputBuffer.getChannelData(0);
audioBuffer.push(...inputData);
// Resume AudioContext if suspended (Chrome policy)
if (audioContext.state === 'suspended') {
logToMain('warn', 'AudioContext suspended, attempting resume...');
audioContext.resume().then(() => {
logToMain('info', 'AudioContext resumed successfully');
}).catch(err => {
logToMain('error', 'Failed to resume AudioContext:', err.message);
// Process audio in chunks
while (audioBuffer.length >= samplesPerChunk) {
const chunk = audioBuffer.splice(0, samplesPerChunk);
const pcmData16 = convertFloat32ToInt16(chunk);
const base64Data = arrayBufferToBase64(pcmData16.buffer);
await ipcRenderer.invoke('send-audio-content', {
data: base64Data,
mimeType: 'audio/pcm;rate=24000',
});
}
};
const source = audioContext.createMediaStreamSource(mediaStream);
audioProcessor = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1);
let audioBuffer = [];
const samplesPerChunk = SAMPLE_RATE * AUDIO_CHUNK_DURATION;
let chunkCount = 0;
let totalSamples = 0;
audioProcessor.onaudioprocess = async e => {
const inputData = e.inputBuffer.getChannelData(0);
audioBuffer.push(...inputData);
totalSamples += inputData.length;
// Process audio in chunks
while (audioBuffer.length >= samplesPerChunk) {
const chunk = audioBuffer.splice(0, samplesPerChunk);
const pcmData16 = convertFloat32ToInt16(chunk);
const base64Data = arrayBufferToBase64(pcmData16.buffer);
await ipcRenderer.invoke('send-audio-content', {
data: base64Data,
mimeType: 'audio/pcm;rate=24000',
});
chunkCount++;
// Log progress every 100 chunks (~10 seconds)
if (chunkCount === 1) {
logToMain('info', 'First audio chunk sent to AI');
cheatingDaddy.setStatus('Listening...');
} else if (chunkCount % 100 === 0) {
// Calculate max amplitude to check if we're getting real audio
const maxAmp = Math.max(...chunk.map(Math.abs));
logToMain('info', `Audio progress: ${chunkCount} chunks, maxAmplitude: ${maxAmp.toFixed(4)}`);
}
}
};
source.connect(audioProcessor);
audioProcessor.connect(audioContext.destination);
logToMain('info', 'Windows audio processing pipeline connected');
} catch (err) {
logToMain('error', 'Error setting up Windows audio:', err.message, err.stack);
cheatingDaddy.setStatus('Audio error: ' + err.message);
}
source.connect(audioProcessor);
audioProcessor.connect(audioContext.destination);
}
async function captureScreenshot(imageQuality = 'medium', isManual = false) {