From 76d6fc2749e531562e3e22b6cdfd5156a8717b20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=98=D0=BB=D1=8C=D1=8F=20=D0=93=D0=BB=D0=B0=D0=B7=D1=83?= =?UTF-8?q?=D0=BD=D0=BE=D0=B2?= Date: Thu, 15 Jan 2026 20:45:34 +0300 Subject: [PATCH] Refactor Windows audio capture setup to streamline processing and remove redundant logging --- package.json | 2 +- src/utils/renderer.js | 101 +++++++++++------------------------------- 2 files changed, 27 insertions(+), 76 deletions(-) diff --git a/package.json b/package.json index a5e565b..383f713 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "cheating-daddy", "productName": "cheating-daddy", - "version": "0.5.8", + "version": "0.5.9", "description": "cheating daddy", "main": "src/index.js", "scripts": { diff --git a/src/utils/renderer.js b/src/utils/renderer.js index f210348..27f8e1d 100644 --- a/src/utils/renderer.js +++ b/src/utils/renderer.js @@ -295,9 +295,6 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu console.log('Linux capture started - system audio:', mediaStream.getAudioTracks().length > 0, 'microphone mode:', audioMode); } else { // Windows - use display media with loopback for system audio - logToMain('info', '=== Starting Windows audio capture ==='); - cheatingDaddy.setStatus('Requesting screen & audio...'); - mediaStream = await navigator.mediaDevices.getDisplayMedia({ video: { frameRate: 1, @@ -313,6 +310,7 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu }, }); + console.log('Windows capture started with loopback audio'); const audioTracks = mediaStream.getAudioTracks(); const videoTracks = mediaStream.getVideoTracks(); @@ -328,14 +326,8 @@ async function startCapture(screenshotIntervalSeconds = 5, imageQuality = 'mediu })), }); - if (audioTracks.length === 0) { - logToMain('warn', 'WARNING: No audio tracks! User must check "Share audio" in screen picker dialog'); - cheatingDaddy.setStatus('Warning: No audio - enable "Share audio" checkbox'); - } else { - logToMain('info', 'Audio track acquired, setting up processing...'); - // Setup audio processing for Windows loopback audio only - setupWindowsLoopbackProcessing(); - } + // Setup audio processing for Windows loopback audio only + setupWindowsLoopbackProcessing(); if (audioMode === 'mic_only' || audioMode === 'both') { let micStream = null; @@ -451,73 +443,32 @@ function setupLinuxSystemAudioProcessing() { function setupWindowsLoopbackProcessing() { // Setup audio processing for Windows loopback audio only - logToMain('info', 'Setting up Windows loopback audio processing...'); - - try { - audioContext = new AudioContext({ sampleRate: SAMPLE_RATE }); - - logToMain('info', 'AudioContext created:', { - state: audioContext.state, - sampleRate: audioContext.sampleRate, - }); - - // Resume AudioContext if suspended (Chrome policy) - if (audioContext.state === 'suspended') { - logToMain('warn', 'AudioContext suspended, attempting resume...'); - audioContext.resume().then(() => { - logToMain('info', 'AudioContext resumed successfully'); - }).catch(err => { - logToMain('error', 'Failed to resume AudioContext:', err.message); + audioContext = new AudioContext({ sampleRate: SAMPLE_RATE }); + const source = audioContext.createMediaStreamSource(mediaStream); + audioProcessor = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1); + + let audioBuffer = []; + const samplesPerChunk = SAMPLE_RATE * AUDIO_CHUNK_DURATION; + + audioProcessor.onaudioprocess = async e => { + const inputData = e.inputBuffer.getChannelData(0); + audioBuffer.push(...inputData); + + // Process audio in chunks + while (audioBuffer.length >= samplesPerChunk) { + const chunk = audioBuffer.splice(0, samplesPerChunk); + const pcmData16 = convertFloat32ToInt16(chunk); + const base64Data = arrayBufferToBase64(pcmData16.buffer); + + await ipcRenderer.invoke('send-audio-content', { + data: base64Data, + mimeType: 'audio/pcm;rate=24000', }); } - - const source = audioContext.createMediaStreamSource(mediaStream); - audioProcessor = audioContext.createScriptProcessor(BUFFER_SIZE, 1, 1); + }; - let audioBuffer = []; - const samplesPerChunk = SAMPLE_RATE * AUDIO_CHUNK_DURATION; - let chunkCount = 0; - let totalSamples = 0; - - audioProcessor.onaudioprocess = async e => { - const inputData = e.inputBuffer.getChannelData(0); - audioBuffer.push(...inputData); - totalSamples += inputData.length; - - // Process audio in chunks - while (audioBuffer.length >= samplesPerChunk) { - const chunk = audioBuffer.splice(0, samplesPerChunk); - const pcmData16 = convertFloat32ToInt16(chunk); - const base64Data = arrayBufferToBase64(pcmData16.buffer); - - await ipcRenderer.invoke('send-audio-content', { - data: base64Data, - mimeType: 'audio/pcm;rate=24000', - }); - - chunkCount++; - - // Log progress every 100 chunks (~10 seconds) - if (chunkCount === 1) { - logToMain('info', 'First audio chunk sent to AI'); - cheatingDaddy.setStatus('Listening...'); - } else if (chunkCount % 100 === 0) { - // Calculate max amplitude to check if we're getting real audio - const maxAmp = Math.max(...chunk.map(Math.abs)); - logToMain('info', `Audio progress: ${chunkCount} chunks, maxAmplitude: ${maxAmp.toFixed(4)}`); - } - } - }; - - source.connect(audioProcessor); - audioProcessor.connect(audioContext.destination); - - logToMain('info', 'Windows audio processing pipeline connected'); - - } catch (err) { - logToMain('error', 'Error setting up Windows audio:', err.message, err.stack); - cheatingDaddy.setStatus('Audio error: ' + err.message); - } + source.connect(audioProcessor); + audioProcessor.connect(audioContext.destination); } async function captureScreenshot(imageQuality = 'medium', isManual = false) {