1 /*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2020 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20 */
21 #include "../../SDL_internal.h"
22
23 #if SDL_AUDIO_DRIVER_EMSCRIPTEN
24
25 #include "SDL_audio.h"
26 #include "../SDL_audio_c.h"
27 #include "SDL_emscriptenaudio.h"
28 #include "SDL_assert.h"
29
30 #include <emscripten/emscripten.h>
31
32 static void
FeedAudioDevice(_THIS,const void * buf,const int buflen)33 FeedAudioDevice(_THIS, const void *buf, const int buflen)
34 {
35 const int framelen = (SDL_AUDIO_BITSIZE(this->spec.format) / 8) * this->spec.channels;
36 EM_ASM_ARGS({
37 var SDL2 = Module['SDL2'];
38 var numChannels = SDL2.audio.currentOutputBuffer['numberOfChannels'];
39 for (var c = 0; c < numChannels; ++c) {
40 var channelData = SDL2.audio.currentOutputBuffer['getChannelData'](c);
41 if (channelData.length != $1) {
42 throw 'Web Audio output buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
43 }
44
45 for (var j = 0; j < $1; ++j) {
46 channelData[j] = HEAPF32[$0 + ((j*numChannels + c) << 2) >> 2]; /* !!! FIXME: why are these shifts here? */
47 }
48 }
49 }, buf, buflen / framelen);
50 }
51
52 static void
HandleAudioProcess(_THIS)53 HandleAudioProcess(_THIS)
54 {
55 SDL_AudioCallback callback = this->callbackspec.callback;
56 const int stream_len = this->callbackspec.size;
57
58 /* Only do something if audio is enabled */
59 if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) {
60 if (this->stream) {
61 SDL_AudioStreamClear(this->stream);
62 }
63 return;
64 }
65
66 if (this->stream == NULL) { /* no conversion necessary. */
67 SDL_assert(this->spec.size == stream_len);
68 callback(this->callbackspec.userdata, this->work_buffer, stream_len);
69 } else { /* streaming/converting */
70 int got;
71 while (SDL_AudioStreamAvailable(this->stream) < ((int) this->spec.size)) {
72 callback(this->callbackspec.userdata, this->work_buffer, stream_len);
73 if (SDL_AudioStreamPut(this->stream, this->work_buffer, stream_len) == -1) {
74 SDL_AudioStreamClear(this->stream);
75 SDL_AtomicSet(&this->enabled, 0);
76 break;
77 }
78 }
79
80 got = SDL_AudioStreamGet(this->stream, this->work_buffer, this->spec.size);
81 SDL_assert((got < 0) || (got == this->spec.size));
82 if (got != this->spec.size) {
83 SDL_memset(this->work_buffer, this->spec.silence, this->spec.size);
84 }
85 }
86
87 FeedAudioDevice(this, this->work_buffer, this->spec.size);
88 }
89
90 static void
HandleCaptureProcess(_THIS)91 HandleCaptureProcess(_THIS)
92 {
93 SDL_AudioCallback callback = this->callbackspec.callback;
94 const int stream_len = this->callbackspec.size;
95
96 /* Only do something if audio is enabled */
97 if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) {
98 SDL_AudioStreamClear(this->stream);
99 return;
100 }
101
102 EM_ASM_ARGS({
103 var SDL2 = Module['SDL2'];
104 var numChannels = SDL2.capture.currentCaptureBuffer.numberOfChannels;
105 for (var c = 0; c < numChannels; ++c) {
106 var channelData = SDL2.capture.currentCaptureBuffer.getChannelData(c);
107 if (channelData.length != $1) {
108 throw 'Web Audio capture buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
109 }
110
111 if (numChannels == 1) { /* fastpath this a little for the common (mono) case. */
112 for (var j = 0; j < $1; ++j) {
113 setValue($0 + (j * 4), channelData[j], 'float');
114 }
115 } else {
116 for (var j = 0; j < $1; ++j) {
117 setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float');
118 }
119 }
120 }
121 }, this->work_buffer, (this->spec.size / sizeof (float)) / this->spec.channels);
122
123 /* okay, we've got an interleaved float32 array in C now. */
124
125 if (this->stream == NULL) { /* no conversion necessary. */
126 SDL_assert(this->spec.size == stream_len);
127 callback(this->callbackspec.userdata, this->work_buffer, stream_len);
128 } else { /* streaming/converting */
129 if (SDL_AudioStreamPut(this->stream, this->work_buffer, this->spec.size) == -1) {
130 SDL_AtomicSet(&this->enabled, 0);
131 }
132
133 while (SDL_AudioStreamAvailable(this->stream) >= stream_len) {
134 const int got = SDL_AudioStreamGet(this->stream, this->work_buffer, stream_len);
135 SDL_assert((got < 0) || (got == stream_len));
136 if (got != stream_len) {
137 SDL_memset(this->work_buffer, this->callbackspec.silence, stream_len);
138 }
139 callback(this->callbackspec.userdata, this->work_buffer, stream_len); /* Send it to the app. */
140 }
141 }
142 }
143
144
145 static void
EMSCRIPTENAUDIO_CloseDevice(_THIS)146 EMSCRIPTENAUDIO_CloseDevice(_THIS)
147 {
148 EM_ASM_({
149 var SDL2 = Module['SDL2'];
150 if ($0) {
151 if (SDL2.capture.silenceTimer !== undefined) {
152 clearTimeout(SDL2.capture.silenceTimer);
153 }
154 if (SDL2.capture.stream !== undefined) {
155 var tracks = SDL2.capture.stream.getAudioTracks();
156 for (var i = 0; i < tracks.length; i++) {
157 SDL2.capture.stream.removeTrack(tracks[i]);
158 }
159 SDL2.capture.stream = undefined;
160 }
161 if (SDL2.capture.scriptProcessorNode !== undefined) {
162 SDL2.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {};
163 SDL2.capture.scriptProcessorNode.disconnect();
164 SDL2.capture.scriptProcessorNode = undefined;
165 }
166 if (SDL2.capture.mediaStreamNode !== undefined) {
167 SDL2.capture.mediaStreamNode.disconnect();
168 SDL2.capture.mediaStreamNode = undefined;
169 }
170 if (SDL2.capture.silenceBuffer !== undefined) {
171 SDL2.capture.silenceBuffer = undefined
172 }
173 SDL2.capture = undefined;
174 } else {
175 if (SDL2.audio.scriptProcessorNode != undefined) {
176 SDL2.audio.scriptProcessorNode.disconnect();
177 SDL2.audio.scriptProcessorNode = undefined;
178 }
179 SDL2.audio = undefined;
180 }
181 if ((SDL2.audioContext !== undefined) && (SDL2.audio === undefined) && (SDL2.capture === undefined)) {
182 SDL2.audioContext.close();
183 SDL2.audioContext = undefined;
184 }
185 }, this->iscapture);
186
187 #if 0 /* !!! FIXME: currently not used. Can we move some stuff off the SDL2 namespace? --ryan. */
188 SDL_free(this->hidden);
189 #endif
190 }
191
192 static int
EMSCRIPTENAUDIO_OpenDevice(_THIS,void * handle,const char * devname,int iscapture)193 EMSCRIPTENAUDIO_OpenDevice(_THIS, void *handle, const char *devname, int iscapture)
194 {
195 SDL_bool valid_format = SDL_FALSE;
196 SDL_AudioFormat test_format;
197 int result;
198
199 /* based on parts of library_sdl.js */
200
201 /* create context */
202 result = EM_ASM_INT({
203 if(typeof(Module['SDL2']) === 'undefined') {
204 Module['SDL2'] = {};
205 }
206 var SDL2 = Module['SDL2'];
207 if (!$0) {
208 SDL2.audio = {};
209 } else {
210 SDL2.capture = {};
211 }
212
213 if (!SDL2.audioContext) {
214 if (typeof(AudioContext) !== 'undefined') {
215 SDL2.audioContext = new AudioContext();
216 } else if (typeof(webkitAudioContext) !== 'undefined') {
217 SDL2.audioContext = new webkitAudioContext();
218 }
219 }
220 return SDL2.audioContext === undefined ? -1 : 0;
221 }, iscapture);
222 if (result < 0) {
223 return SDL_SetError("Web Audio API is not available!");
224 }
225
226 test_format = SDL_FirstAudioFormat(this->spec.format);
227 while ((!valid_format) && (test_format)) {
228 switch (test_format) {
229 case AUDIO_F32: /* web audio only supports floats */
230 this->spec.format = test_format;
231
232 valid_format = SDL_TRUE;
233 break;
234 }
235 test_format = SDL_NextAudioFormat();
236 }
237
238 if (!valid_format) {
239 /* Didn't find a compatible format :( */
240 return SDL_SetError("No compatible audio format!");
241 }
242
243 /* Initialize all variables that we clean on shutdown */
244 #if 0 /* !!! FIXME: currently not used. Can we move some stuff off the SDL2 namespace? --ryan. */
245 this->hidden = (struct SDL_PrivateAudioData *)
246 SDL_malloc((sizeof *this->hidden));
247 if (this->hidden == NULL) {
248 return SDL_OutOfMemory();
249 }
250 SDL_zerop(this->hidden);
251 #endif
252 this->hidden = (struct SDL_PrivateAudioData *)0x1;
253
254 /* limit to native freq */
255 this->spec.freq = EM_ASM_INT_V({
256 var SDL2 = Module['SDL2'];
257 return SDL2.audioContext.sampleRate;
258 });
259
260 SDL_CalculateAudioSpec(&this->spec);
261
262 if (iscapture) {
263 /* The idea is to take the capture media stream, hook it up to an
264 audio graph where we can pass it through a ScriptProcessorNode
265 to access the raw PCM samples and push them to the SDL app's
266 callback. From there, we "process" the audio data into silence
267 and forget about it. */
268
269 /* This should, strictly speaking, use MediaRecorder for capture, but
270 this API is cleaner to use and better supported, and fires a
271 callback whenever there's enough data to fire down into the app.
272 The downside is that we are spending CPU time silencing a buffer
273 that the audiocontext uselessly mixes into any output. On the
274 upside, both of those things are not only run in native code in
275 the browser, they're probably SIMD code, too. MediaRecorder
276 feels like it's a pretty inefficient tapdance in similar ways,
277 to be honest. */
278
279 EM_ASM_({
280 var SDL2 = Module['SDL2'];
281 var have_microphone = function(stream) {
282 //console.log('SDL audio capture: we have a microphone! Replacing silence callback.');
283 if (SDL2.capture.silenceTimer !== undefined) {
284 clearTimeout(SDL2.capture.silenceTimer);
285 SDL2.capture.silenceTimer = undefined;
286 }
287 SDL2.capture.mediaStreamNode = SDL2.audioContext.createMediaStreamSource(stream);
288 SDL2.capture.scriptProcessorNode = SDL2.audioContext.createScriptProcessor($1, $0, 1);
289 SDL2.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
290 if ((SDL2 === undefined) || (SDL2.capture === undefined)) { return; }
291 audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0);
292 SDL2.capture.currentCaptureBuffer = audioProcessingEvent.inputBuffer;
293 dynCall('vi', $2, [$3]);
294 };
295 SDL2.capture.mediaStreamNode.connect(SDL2.capture.scriptProcessorNode);
296 SDL2.capture.scriptProcessorNode.connect(SDL2.audioContext.destination);
297 SDL2.capture.stream = stream;
298 };
299
300 var no_microphone = function(error) {
301 //console.log('SDL audio capture: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
302 };
303
304 /* we write silence to the audio callback until the microphone is available (user approves use, etc). */
305 SDL2.capture.silenceBuffer = SDL2.audioContext.createBuffer($0, $1, SDL2.audioContext.sampleRate);
306 SDL2.capture.silenceBuffer.getChannelData(0).fill(0.0);
307 var silence_callback = function() {
308 SDL2.capture.currentCaptureBuffer = SDL2.capture.silenceBuffer;
309 dynCall('vi', $2, [$3]);
310 };
311
312 SDL2.capture.silenceTimer = setTimeout(silence_callback, ($1 / SDL2.audioContext.sampleRate) * 1000);
313
314 if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) {
315 navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone);
316 } else if (navigator.webkitGetUserMedia !== undefined) {
317 navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone);
318 }
319 }, this->spec.channels, this->spec.samples, HandleCaptureProcess, this);
320 } else {
321 /* setup a ScriptProcessorNode */
322 EM_ASM_ARGS({
323 var SDL2 = Module['SDL2'];
324 SDL2.audio.scriptProcessorNode = SDL2.audioContext['createScriptProcessor']($1, 0, $0);
325 SDL2.audio.scriptProcessorNode['onaudioprocess'] = function (e) {
326 if ((SDL2 === undefined) || (SDL2.audio === undefined)) { return; }
327 SDL2.audio.currentOutputBuffer = e['outputBuffer'];
328 dynCall('vi', $2, [$3]);
329 };
330 SDL2.audio.scriptProcessorNode['connect'](SDL2.audioContext['destination']);
331 }, this->spec.channels, this->spec.samples, HandleAudioProcess, this);
332 }
333
334 return 0;
335 }
336
337 static int
EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl * impl)338 EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl * impl)
339 {
340 int available;
341 int capture_available;
342
343 /* Set the function pointers */
344 impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice;
345 impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice;
346
347 impl->OnlyHasDefaultOutputDevice = 1;
348
349 /* no threads here */
350 impl->SkipMixerLock = 1;
351 impl->ProvidesOwnCallbackThread = 1;
352
353 /* check availability */
354 available = EM_ASM_INT_V({
355 if (typeof(AudioContext) !== 'undefined') {
356 return 1;
357 } else if (typeof(webkitAudioContext) !== 'undefined') {
358 return 1;
359 }
360 return 0;
361 });
362
363 if (!available) {
364 SDL_SetError("No audio context available");
365 }
366
367 capture_available = available && EM_ASM_INT_V({
368 if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) {
369 return 1;
370 } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') {
371 return 1;
372 }
373 return 0;
374 });
375
376 impl->HasCaptureSupport = capture_available ? SDL_TRUE : SDL_FALSE;
377 impl->OnlyHasDefaultCaptureDevice = capture_available ? SDL_TRUE : SDL_FALSE;
378
379 return available;
380 }
381
382 AudioBootStrap EMSCRIPTENAUDIO_bootstrap = {
383 "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, 0
384 };
385
386 #endif /* SDL_AUDIO_DRIVER_EMSCRIPTEN */
387
388 /* vi: set ts=4 sw=4 expandtab: */
389