From 64d84ea44e4bc4d0035920afb28bfbf7339c5af5 Mon Sep 17 00:00:00 2001 From: tinyzimmer <38474291+tinyzimmer@users.noreply.github.com> Date: Thu, 27 Aug 2020 18:21:26 +0300 Subject: [PATCH 1/2] first draft audio support --- core/rfb.js | 84 ++++++++++++++++++++++++++++++++++++++++++++++ core/util/audio.js | 48 ++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 core/util/audio.js diff --git a/core/rfb.js b/core/rfb.js index 876255ba2..411b3c414 100644 --- a/core/rfb.js +++ b/core/rfb.js @@ -13,6 +13,7 @@ import { encodeUTF8, decodeUTF8 } from './util/strings.js'; import { dragThreshold } from './util/browser.js'; import { clientToElement } from './util/element.js'; import { setCapture } from './util/events.js'; +import AudioBuffer from './util/audio.js'; import EventTargetMixin from './util/eventtarget.js'; import Display from "./display.js"; import Inflator from "./inflator.js"; @@ -503,6 +504,15 @@ export default class RFB extends EventTargetMixin { } } + enableAudio(sampleFormat, channels, frequency) { + RFB.messages.SetQEMUExtendedAudioFormat(this._sock, sampleFormat, channels, frequency); + RFB.messages.ToggleQEMUExtendedAudio(this._sock, true); + } + + disableAudio() { + RFB.messages.ToggleQEMUExtendedAudio(this._sock, false); + } + // ===== PRIVATE METHODS ===== _connect() { @@ -2048,6 +2058,25 @@ export default class RFB extends EventTargetMixin { return true; } + _handleQEMUExtAudioMsg() { + if (this._sock.rQwait("QEMU extended audio message", 3, 1)) { return false; } + + this._sock.rQshift8(); // for now there is only a single submessage type 1 + const operation = this._sock.rQshift16(); + + if (operation === 1) { // stream is starting + this._audioBuffer = new AudioBuffer('audio/webm; codecs="opus"'); // TODO: This is obviously not the right value to use here + } else if (operation === 0) { // stream is stopping + this._audioBuffer.close(); + } else { // stream data + const length = this._sock.rQshift32(); + const data = this._sock.rQshiftBytes(length); + this._audioBuffer.queueAudio(data); + } + + return true; + } + _handleXvpMsg() { if (this._sock.rQwait("XVP version and message", 3, 1)) { return false; } this._sock.rQskipBytes(1); // Padding @@ -2122,6 +2151,9 @@ export default class RFB extends EventTargetMixin { case 250: // XVP return this._handleXvpMsg(); + case 255: // Qemu extended audio message + return this._handleQEMUExtAudioMsg(); + default: this._fail("Unexpected server message (type " + msgType + ")"); Log.Debug("sock.rQslice(0, 30): " + this._sock.rQslice(0, 30)); @@ -2550,6 +2582,16 @@ export default class RFB extends EventTargetMixin { } } +// Audio sample formats +RFB.sampleFormats = { + U8: 0, + S8: 1, + U16: 2, + S16: 3, + U32: 4, + S32: 5 +}; + // Class Methods RFB.messages = { keyEvent(sock, keysym, down) { @@ -2571,6 +2613,48 @@ RFB.messages = { sock.flush(); }, + ToggleQEMUExtendedAudio(sock, enabled) { + const buff = sock._sQ; + const offset = sock._sQlen; + + buff[offset] = 255; // msg-type + buff[offset + 1] = 1; // sub msg-type + + buff[offset + 2] = 0; // operation + if (enabled) { + buff[offset + 3] = 0; + } else { + buff[offset + 3] = 1; + } + + sock._sQlen += 4; + sock.flush(); + }, + + SetQEMUExtendedAudioFormat(sock, sampleFormat, channels, frequency) { + const buff = sock._sQ; + const offset = sock._sQlen; + + buff[offset] = 255; // msg type + buff[offset + 1] = 1; // sub msg-type + + buff[offset + 2] = 0; // operation + buff[offset + 3] = 2; + + buff[offset + 4] = sampleFormat; + buff[offset + 5] = channels; + + const freq = toUnsigned32bit(frequency); + + buff[offset + 6] = freq >> 24; + buff[offset + 7] = freq >> 16; + buff[offset + 8] = freq >> 8; + buff[offset + 9] = freq; + + sock._sQlen += 10; + sock.flush(); + }, + QEMUExtendedKeyEvent(sock, keysym, down, keycode) { function getRFBkeycode(xtScanCode) { const upperByte = (keycode >> 8); diff --git a/core/util/audio.js b/core/util/audio.js new file mode 100644 index 000000000..7979bd4e8 --- /dev/null +++ b/core/util/audio.js @@ -0,0 +1,48 @@ +/* + * noVNC: HTML5 VNC client + * Copyright (C) 2020 The noVNC Authors + * Licensed under MPL 2.0 (see LICENSE.txt) + * + * See README.md for usage and integration instructions. + */ + +export default class AudioBuffer { + constructor(codec) { + this._codec = codec; + // instantiate a media source and audio buffer/queue + this._mediaSource = new MediaSource(); + this._audioBuffer = null; + this._audioQ = []; + + // create a hidden audio element + this._audio = document.createElement('audio'); + this._audio.src = window.URL.createObjectURL(this._mediaSource); + + // when data is queued, start playing + this._mediaSource.addEventListener('sourceopen', this._onSourceOpen, false); + } + + _onSourceOpen(e) { + this._audio.play(); + this._audioBuffer = this._mediaSource.addSourceBuffer(this._codec); + this._audioBuffer.addEventListener('update', this._onUpdateBuffer); + } + + _onUpdateBuffer() { + if (this._audioQ.length > 0 && !this._audioBuffer.updating) { + this._audioBuffer.appendBuffer(this._audioQ.shift()); + } + } + + queueAudio(data) { + if (this._audioBuffer !== null) { + if (this._audioBuffer.updating || this._audioQ.length > 0) { + this._audioQ.push(data); + } else { + this._audioBuffer.appendBuffer(data); + } + } + } + + close() {} // intentionally left empty as no cleanup seems necessary +} \ No newline at end of file From 70c57e86d8821f7ef1f02ed70efcd27f38948c11 Mon Sep 17 00:00:00 2001 From: lhchavez Date: Sat, 6 Feb 2021 17:28:12 +0000 Subject: [PATCH 2/2] Support the Repl.it Audio messages This change: a) Adds a button to toggle the audio stream. This is needed since most browsers require the audio play action to be run in a user event handler, so having the toggle audio be an explicit button achieves that (and is also to avoid annoying users). b) Drops support for the QEMU audio extension and instead uses the Repl.it Audio messages, which allows for explicit negotiation for audio compression. c) Makes the audio library more robust and now works in Chrome and Firefox. --- app/images/audio.svg | 92 +++++++++++++++++++++ app/ui.js | 46 ++++++++++- core/encodings.js | 1 + core/rfb.js | 177 ++++++++++++++++++++++++++++------------ core/util/audio.js | 189 ++++++++++++++++++++++++++++++++++++++----- vnc.html | 5 ++ 6 files changed, 437 insertions(+), 73 deletions(-) create mode 100644 app/images/audio.svg diff --git a/app/images/audio.svg b/app/images/audio.svg new file mode 100644 index 000000000..6f2c1831f --- /dev/null +++ b/app/images/audio.svg @@ -0,0 +1,92 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + diff --git a/app/ui.js b/app/ui.js index 0b5bfd01a..0cc7619e2 100644 --- a/app/ui.js +++ b/app/ui.js @@ -224,6 +224,9 @@ const UI = { document.getElementById("noVNC_view_drag_button") .addEventListener('click', UI.toggleViewDrag); + document.getElementById("noVNC_audio_button") + .addEventListener('click', UI.toggleEnableAudio); + document.getElementById("noVNC_control_bar_handle") .addEventListener('mousedown', UI.controlbarHandleMouseDown); document.getElementById("noVNC_control_bar_handle") @@ -438,7 +441,7 @@ const UI = { UI.enableSetting('port'); UI.enableSetting('path'); UI.enableSetting('repeaterID'); - UI.updatePowerButton(); + UI.updateCapabilities(); UI.keepControlbar(); } @@ -869,6 +872,24 @@ const UI = { } }, + updateCapabilities() { + UI.updatePowerButton(); + UI.updateAudioButton(); + }, + + updateAudioButton() { + if (UI.connected && + UI.rfb.capabilities.audio) { + document.getElementById('noVNC_audio_button') + .classList.remove("noVNC_hidden"); + document.getElementById('noVNC_audio_button') + .classList.remove("noVNC_selected"); + } else { + document.getElementById('noVNC_audio_button') + .classList.add("noVNC_hidden"); + } + }, + /* ------^------- * /SETTINGS * ============== @@ -1032,7 +1053,7 @@ const UI = { UI.rfb.addEventListener("disconnect", UI.disconnectFinished); UI.rfb.addEventListener("credentialsrequired", UI.credentials); UI.rfb.addEventListener("securityfailure", UI.securityFailed); - UI.rfb.addEventListener("capabilities", UI.updatePowerButton); + UI.rfb.addEventListener("capabilities", UI.updateCapabilities); UI.rfb.addEventListener("clipboard", UI.clipboardReceive); UI.rfb.addEventListener("bell", UI.bell); UI.rfb.addEventListener("desktopname", UI.updateDesktopName); @@ -1647,6 +1668,27 @@ const UI = { } }, + toggleEnableAudio() { + if (!UI.rfb) return; + + if (!document.getElementById('noVNC_audio_button') + .classList.contains("noVNC_selected")) { + UI.rfb.enableAudio( + 2, + MediaSource.isTypeSupported('audio/webm;codecs=opus') ? + RFB.audioCodecs.OpusWebM : + RFB.audioCodecs.MP3, + 32 * 1024 // 32kbps + ); + document.getElementById('noVNC_audio_button') + .classList.add("noVNC_selected"); + } else { + UI.rfb.disableAudio(); + document.getElementById('noVNC_audio_button') + .classList.remove("noVNC_selected"); + } + }, + updateShowDotCursor() { if (!UI.rfb) return; UI.rfb.showDotCursor = UI.getSetting('show_dot'); diff --git a/core/encodings.js b/core/encodings.js index 51c099291..dcd64f8cc 100644 --- a/core/encodings.js +++ b/core/encodings.js @@ -27,6 +27,7 @@ export const encodings = { pseudoEncodingContinuousUpdates: -313, pseudoEncodingCompressLevel9: -247, pseudoEncodingCompressLevel0: -256, + pseudoEncodingReplitAudio: 0x52706c41, pseudoEncodingVMwareCursor: 0x574d5664, pseudoEncodingExtendedClipboard: 0xc0a1e5ce }; diff --git a/core/rfb.js b/core/rfb.js index 411b3c414..2f02af845 100644 --- a/core/rfb.js +++ b/core/rfb.js @@ -13,7 +13,7 @@ import { encodeUTF8, decodeUTF8 } from './util/strings.js'; import { dragThreshold } from './util/browser.js'; import { clientToElement } from './util/element.js'; import { setCapture } from './util/events.js'; -import AudioBuffer from './util/audio.js'; +import AudioStream from './util/audio.js'; import EventTargetMixin from './util/eventtarget.js'; import Display from "./display.js"; import Inflator from "./inflator.js"; @@ -112,7 +112,7 @@ export default class RFB extends EventTargetMixin { this._fbName = ""; - this._capabilities = { power: false }; + this._capabilities = { power: false, audio: false }; this._supportsFence = false; @@ -124,6 +124,8 @@ export default class RFB extends EventTargetMixin { this._screenFlags = 0; this._qemuExtKeyEventSupported = false; + this._replitAudioSupported = false; + this._replitAudioServerVersion = -1; this._clipboardText = null; this._clipboardServerCapabilitiesActions = {}; @@ -169,6 +171,11 @@ export default class RFB extends EventTargetMixin { this._gestureLastMagnitudeX = 0; this._gestureLastMagnitudeY = 0; + // Audio state + this._audioEnabled = false; + this._audioMimeType = null; + this._audioStream = null; + // Bound event handlers this._eventHandlers = { focusCanvas: this._focusCanvas.bind(this), @@ -504,13 +511,23 @@ export default class RFB extends EventTargetMixin { } } - enableAudio(sampleFormat, channels, frequency) { - RFB.messages.SetQEMUExtendedAudioFormat(this._sock, sampleFormat, channels, frequency); - RFB.messages.ToggleQEMUExtendedAudio(this._sock, true); + enableAudio(channels, codec, kbps) { + if (this._audioEnabled) { return; } + + this._audioEnabled = true; + if (codec == RFB.audioCodecs.OpusWebM) { + this._audioMimeType = 'audio/webm;codecs=opus'; + } else if (codec == RFB.audioCodecs.MP3) { + this._audioMimeType = 'audio/mpeg'; + } + RFB.messages.ReplitAudioStartEncoder(this._sock, true, channels, codec, kbps); } disableAudio() { - RFB.messages.ToggleQEMUExtendedAudio(this._sock, false); + if (!this._audioEnabled) { return; } + + this._audioEnabled = false; + RFB.messages.ReplitAudioStartEncoder(this._sock, false, 0, 0, 0); } // ===== PRIVATE METHODS ===== @@ -1788,6 +1805,7 @@ export default class RFB extends EventTargetMixin { encs.push(encodings.pseudoEncodingDesktopSize); encs.push(encodings.pseudoEncodingLastRect); encs.push(encodings.pseudoEncodingQEMUExtendedKeyEvent); + encs.push(encodings.pseudoEncodingReplitAudio); encs.push(encodings.pseudoEncodingExtendedDesktopSize); encs.push(encodings.pseudoEncodingXvp); encs.push(encodings.pseudoEncodingFence); @@ -2058,20 +2076,49 @@ export default class RFB extends EventTargetMixin { return true; } - _handleQEMUExtAudioMsg() { - if (this._sock.rQwait("QEMU extended audio message", 3, 1)) { return false; } + _handleReplitAudioPseudoEncodingMsg() { + if (this._sock.rQwait("Repl.it audio message", 3, 1)) { return false; } + const submessage = this._sock.rQshift8(); + const length = this._sock.rQshift16(); + if (this._sock.rQwait("Repl.it audio message", length, 4)) { return false; } + + switch (submessage) { + case 0: { // StartCapture response. + const enabled = this._sock.rQshift8() == 1; + + if (enabled) { + this._audioStream = new AudioStream(this._audioMimeType); + RFB.messages.ReplitAudioEnableContinuousUpdate(this._sock); + } else if (this._audioStream != null) { + this._audioStream.close(); + this._audioStream = null; + } + break; + } + + case 1: { // AudioFrame response. + const keyframeAndTimestamp = this._sock.rQshift32(); + const keyframe = (keyframeAndTimestamp & 0x80000000) != 0; + const timestamp = keyframeAndTimestamp & 0x7fffffff; + const data = this._sock.rQshiftBytes(length - 4); + if (this._audioStream != null) { + this._audioStream.queueAudioFrame(timestamp / 1000, keyframe, data); + } + break; + } - this._sock.rQshift8(); // for now there is only a single submessage type 1 - const operation = this._sock.rQshift16(); + case 2: { // StartContinuousUpdates response. + const enabled = this._sock.rQshift8() == 1; + if (!enabled && this._audioStream != null) { + this._audioStream.close(); + this._audioStream = null; + } + break; + } - if (operation === 1) { // stream is starting - this._audioBuffer = new AudioBuffer('audio/webm; codecs="opus"'); // TODO: This is obviously not the right value to use here - } else if (operation === 0) { // stream is stopping - this._audioBuffer.close(); - } else { // stream data - const length = this._sock.rQshift32(); - const data = this._sock.rQshiftBytes(length); - this._audioBuffer.queueAudio(data); + default: + this._fail("Illegal server Repl.it audio message (msg: " + submessage + ")"); + break; } return true; @@ -2145,15 +2192,15 @@ export default class RFB extends EventTargetMixin { } return true; + case 245: // Repl.it audio message + return this._handleReplitAudioPseudoEncodingMsg(); + case 248: // ServerFence return this._handleServerFenceMsg(); case 250: // XVP return this._handleXvpMsg(); - case 255: // Qemu extended audio message - return this._handleQEMUExtAudioMsg(); - default: this._fail("Unexpected server message (type " + msgType + ")"); Log.Debug("sock.rQslice(0, 30): " + this._sock.rQslice(0, 30)); @@ -2227,6 +2274,9 @@ export default class RFB extends EventTargetMixin { this._qemuExtKeyEventSupported = true; return true; + case encodings.pseudoEncodingReplitAudio: + return this._handleReplitAudioPseudoEncoding(); + case encodings.pseudoEncodingDesktopName: return this._handleDesktopName(); @@ -2395,6 +2445,25 @@ export default class RFB extends EventTargetMixin { return true; } + _handleReplitAudioPseudoEncoding() { + if (this._sock.rQwait("Repl.it audio", 4)) { + return false; + } + + const version = this._sock.rQshift16(); + const codecs = this._sock.rQshift16(); + + if (this._sock.rQwait("Repl.it audio", 2 * codecs, 4)) { + return false; + } + this._sock.rQshiftStr(2 * codecs); + + this._replitAudioSupported = true; + this._replitAudioServerVersion = version; + this._setCapability("audio", true); + return true; + } + _handleDesktopName() { if (this._sock.rQwait("DesktopName", 4)) { return false; @@ -2582,14 +2651,10 @@ export default class RFB extends EventTargetMixin { } } -// Audio sample formats -RFB.sampleFormats = { - U8: 0, - S8: 1, - U16: 2, - S16: 3, - U32: 4, - S32: 5 +// Audio codecs +RFB.audioCodecs = { + OpusWebM: 0, + MP3: 1, }; // Class Methods @@ -2613,45 +2678,51 @@ RFB.messages = { sock.flush(); }, - ToggleQEMUExtendedAudio(sock, enabled) { + ReplitAudioStartEncoder(sock, enabled, channels, codec, kbps) { const buff = sock._sQ; const offset = sock._sQlen; - buff[offset] = 255; // msg-type - buff[offset + 1] = 1; // sub msg-type + buff[offset] = 245; // msg-type + buff[offset + 1] = 0; // sub msg-type + buff[offset + 2] = 0; + buff[offset + 3] = 6; // length - buff[offset + 2] = 0; // operation - if (enabled) { - buff[offset + 3] = 0; - } else { - buff[offset + 3] = 1; - } + buff[offset + 4] = enabled ? 1 : 0; // enabled + buff[offset + 5] = channels; - sock._sQlen += 4; + buff[offset + 6] = codec >> 8; + buff[offset + 7] = codec; + + buff[offset + 8] = kbps >> 8; + buff[offset + 9] = kbps; + + sock._sQlen += 10; sock.flush(); }, - SetQEMUExtendedAudioFormat(sock, sampleFormat, channels, frequency) { + ReplitAudioRequestFrame(sock, channels, codec, kbps) { const buff = sock._sQ; const offset = sock._sQlen; - buff[offset] = 255; // msg type - buff[offset + 1] = 1; // sub msg-type - - buff[offset + 2] = 0; // operation - buff[offset + 3] = 2; + buff[offset] = 245; // msg-type + buff[offset + 1] = 1; // sub msg-type + buff[offset + 2] = 0; + buff[offset + 3] = 0; // length - buff[offset + 4] = sampleFormat; - buff[offset + 5] = channels; + sock._sQlen += 4; + sock.flush(); + }, - const freq = toUnsigned32bit(frequency); + ReplitAudioEnableContinuousUpdate(sock) { + const buff = sock._sQ; + const offset = sock._sQlen; - buff[offset + 6] = freq >> 24; - buff[offset + 7] = freq >> 16; - buff[offset + 8] = freq >> 8; - buff[offset + 9] = freq; + buff[offset] = 245; // msg-type + buff[offset + 1] = 2; // sub msg-type + buff[offset + 2] = 0; + buff[offset + 3] = 0; // length - sock._sQlen += 10; + sock._sQlen += 4; sock.flush(); }, diff --git a/core/util/audio.js b/core/util/audio.js index 7979bd4e8..dbfc40034 100644 --- a/core/util/audio.js +++ b/core/util/audio.js @@ -1,48 +1,201 @@ /* * noVNC: HTML5 VNC client - * Copyright (C) 2020 The noVNC Authors + * Copyright (C) 2021 The noVNC Authors * Licensed under MPL 2.0 (see LICENSE.txt) * * See README.md for usage and integration instructions. */ -export default class AudioBuffer { +// The maximum allowable de-sync, in seconds. If the time between the last +// received timestamp and the current audio playback timestamp exceeds this +// value, the audio stream will be seeked to the most current timestamp +// possible. +const MAX_ALLOWABLE_DESYNC = 0.5; + +// The amount of time, in seconds, to keep in the audio buffer while seeking. +// Whenever a de-sync event happens and we need to seek to a future +// timestamp, we skip to the last buffered time minus this amount, so that the +// browser has this amount of time worth of buffered audio data. This is done +// to avoid having the browser enter a buffering state just after seeking. +const SEEK_BUFFER_LENGTH = 0.2; + +// An audio stream built upon Media Stream Extensions. +export default class AudioStream { constructor(codec) { this._codec = codec; - // instantiate a media source and audio buffer/queue + this._reset(); + } + + _reset() { + // Instantiate a media source and audio buffer/queue. this._mediaSource = new MediaSource(); this._audioBuffer = null; this._audioQ = []; - // create a hidden audio element - this._audio = document.createElement('audio'); + // Create a hidden audio element. + this._audio = document.createElement("audio"); this._audio.src = window.URL.createObjectURL(this._mediaSource); - // when data is queued, start playing - this._mediaSource.addEventListener('sourceopen', this._onSourceOpen, false); + // When data is queued, start playing. + this._audio.autoplay = true; + this._mediaSource.addEventListener( + "sourceopen", + this._onSourceOpen.bind(this), + false + ); + this._audio.addEventListener( + "error", + (ev) => { + console.error("Audio element error", ev); + }, + false + ); + this._audio.addEventListener("canplay", () => { + try { + this._audio.play(); + } catch (e) { + // Firefox and Chrome are totally cool with playing this + // the moment we can do it, but Safari throws an exception + // since play() is not called in a stack that ran a user + // event handler. + } + }); } _onSourceOpen(e) { - this._audio.play(); + if (this._audioBuffer) { + return; + } this._audioBuffer = this._mediaSource.addSourceBuffer(this._codec); - this._audioBuffer.addEventListener('update', this._onUpdateBuffer); + this._audioBuffer.mode = "segments"; + this._audioBuffer.addEventListener( + "updateend", + this._onUpdateBuffer.bind(this) + ); + this._audioBuffer.addEventListener("error", (ev) => { + console.error("AudioBuffer error", ev); + }); } _onUpdateBuffer() { - if (this._audioQ.length > 0 && !this._audioBuffer.updating) { - this._audioBuffer.appendBuffer(this._audioQ.shift()); + if ( + !this._audioBuffer || + this._audioBuffer.updating || + this._audio.error + ) { + // The audio buffer is not yet ready to accept any new data. + return; + } + if (!this._audioQ.length) { + // There's nothing to append. + return; + } + + const timestamp = this._audioQ[0][0]; + if (this._audioQ.length === 1) { + this._appendChunk(timestamp, this._audioQ.pop()[1]); + return; + } + + // If there is more than one chunk in the queue, they are coalesced + // into a single buffer. This is because following appendBuffer(), + // the audio buffer changes to an "updating" state for a small amount + // of time and any new chunks won't be able to be appended immediately. + // Since the internal queue is used when the browser is trying to catch + // up with the server, we want to have the audio buffer unappendable + // for a smaller amount of time. + let chunkLength = 0; + for (let i = 0; i < this._audioQ.length; ++i) { + chunkLength += this._audioQ[i][1].byteLength; + } + const chunk = new Uint8Array(chunkLength); + let offset = 0; + for (let i = 0; i < this._audioQ.length; ++i) { + chunk.set(new Uint8Array(this._audioQ[i][1]), offset); + offset += this._audioQ[i][1].byteLength; } + this._audioQ.splice(0, this._audioQ.length); + this._appendChunk(timestamp, chunk); } - queueAudio(data) { - if (this._audioBuffer !== null) { - if (this._audioBuffer.updating || this._audioQ.length > 0) { - this._audioQ.push(data); + // Append a chunk into the AudioBuffer. The caller should ensure that + // the AudioBuffer is ready to receive the chunk. If the difference + // between the current playback position of the audio and the timestamp + // exceeds the maximum allowable desync threshold, the audio will be + // seeked to the latest possible position that doesn't trigger buffering + // to avoid an arbitrarily large desync between video and audio. + _appendChunk(timestamp, chunk) { + this._audioBuffer.appendBuffer(chunk); + if ( + timestamp - this._audio.currentTime > MAX_ALLOWABLE_DESYNC && + (this._audio.seekable.length || this._audio.buffered.length) + ) { + console.debug("maximum allowable desync reached", { + readyState: this._audio.readyState, + buffered: ( + (this._audio.buffered && + this._audio.buffered.length && + this._audio.buffered.end( + this._audio.buffered.length - 1 + )) || + 0 + ).toFixed(2), + seekable: ( + (this._audio.seekable && + this._audio.seekable.length && + this._audio.seekable.end( + this._audio.seekable.length - 1 + )) || + 0 + ).toFixed(2), + time: this._audio.currentTime.toFixed(2), + delta: (timestamp - this._audio.currentTime).toFixed(2) + }); + if (this._audio.buffered && this._audio.buffered.length) { + this._audio.currentTime = + this._audio.buffered.end(this._audio.buffered.length - 1) - + SEEK_BUFFER_LENGTH; } else { - this._audioBuffer.appendBuffer(data); + this._audio.currentTime = + this._audio.seekable.end(this._audio.seekable.length - 1) - + SEEK_BUFFER_LENGTH; } } } - close() {} // intentionally left empty as no cleanup seems necessary -} \ No newline at end of file + // Queues an audio chunk at a particular timestamp. + queueAudioFrame(timestamp, keyframe, chunk) { + // If the MSE audio buffer is not ready to receive the chunk or + // there are some other chunks waiting to be appended, we save + // a copy of it into our own internal queue. Eventually, + // when it becomes ready, we append all pending chunks at once. + if ( + this._audioBuffer === null || + this._audioBuffer.updating || + this._audio.error || + this._audioQ.length + ) { + // We need to make a copy, since `chunk` is a view of the underlying + // buffer owned by Websock, and will be mutated once we return. + // TODO: `keyframe` can be used to decide when to drop a chunk if + // there's enough backpressure. + const copy = new ArrayBuffer(chunk.byteLength); + new Uint8Array(copy).set(new Uint8Array(chunk)); + this._audioQ.push([timestamp, copy]); + this._onUpdateBuffer(); + return; + } + + this._appendChunk(timestamp, chunk); + } + + close() { + if (this._audio) { + this._audio.pause(); + } + this._mediaSource = null; + this._audioBuffer = null; + this._audioQ = []; + this._audio = null; + } +} diff --git a/vnc.html b/vnc.html index 7870b7c30..8e4416352 100644 --- a/vnc.html +++ b/vnc.html @@ -112,6 +112,11 @@

no
VNC

+ + +