forked from cwilso/web-audio-api
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.html
7219 lines (6574 loc) · 270 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html>
<head>
<title>Web Audio API</title>
<meta charset=utf-8>
<script src='respec-w3c-common' async class='remove'></script>
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=MML_HTMLorMML"></script>
<script class='remove'>
var respecConfig = {
specStatus: "ED",
shortName: "webaudio",
edDraftURI: "http://webaudio.github.io/web-audio-api/",
editors: [
{ name: "Paul Adenot",
company: "Mozilla",
companyURL: "http://mozilla.org/",
mailto: "padenot@mozilla.com" },
{
name: "Chris Wilson",
company: "Google, Inc.",
companyURL: "http://google.com",
mailto: "cwilso@google.com" },
],
previousMaturity: "WD",
previousPublishDate: "2012-12-13",
previousURI: "http://www.w3.org/TR/2012/WD-webaudio-20121213/",
wg: "Audio Working Group",
wgURI: "http://www.w3.org/2011/audio/",
wgPublicList: "public-audio",
wgPatentURI: "http://www.w3.org/2004/01/pp-impl/46884/status",
tocIntroductory: true,
copyrightStart: 2013,
otherLinks: [
{
key: "Previous editor",
data : [{value: "Chris Rogers (Until August 2013)"}] },
{
key: "Repository",
href: "https://github.com/WebAudio/web-audio-api" },
{
key: "Bug tracker",
href: "https://github.com/WebAudio/web-audio-api/issues?state=open" },
]
};
</script>
<script>
function findBadLink () {
var old = document.querySelectorAll(".badLink");
for (var i = 0 ; i < old.length; i++) {
nodes[i].style.backgroundColor = "";
nodes[i].classList.remove("badLink");
}
var nodes = document.querySelectorAll('a');
for (var i = 0; i < nodes.length; i++) {
if (nodes[i].href) {
var id = nodes[i].href.split("/");
id = id[id.length - 1];
if (id.length != 0) {
if (id[0] == "#") {
if (document.querySelectorAll(id).length == 0) {
nodes[i].style.backgroundColor = "red";
nodes[i].classList.add("badLink");
console.log(nodes[i].textContent);
}
}
}
} else {
nodes[i].style.backgroundColor = "red";
nodes[i].classList.add("badLink");
}
}
}
function findMissingLink() {
var codetags = document.querySelectorAll("code");
for (var i = 0; i < codetags.length; i++) {
if (!(codetags[i].parentNode instanceof HTMLAnchorElement) ||
codetags[i].parentNode.href == "") {
codetags[i].style.backgroundColor = 'hotpink';
codetags[i].style.color = 'yellow';
console.log(codetags[i].innerHTML);
}
}
}
</script>
</head>
<body>
<section id="abstract">
<p>This specification describes a high-level JavaScript <abbr
title="Application Programming Interface">API</abbr> for processing and
synthesizing audio in web applications. The primary paradigm is of an audio
routing graph, where a number of <a><code>AudioNode</code></a> objects are connected
together to define the overall audio rendering. The actual processing will
primarily take place in the underlying implementation (typically optimized
Assembly / C / C++ code), but <a href="#JavaScriptProcessing">direct
JavaScript processing and synthesis</a> is also supported. </p>
<p>The <a href="#introduction">introductory</a> section covers the motivation
behind this specification.</p>
<p>This API is designed to be used in conjunction with other APIs and elements
on the web platform, notably: XMLHttpRequest [[XHR]]
(using the <code>responseType</code> and <code>response</code> attributes). For
games and interactive applications, it is anticipated to be used with the
<code>canvas</code> 2D [[2dcontext]] and WebGL [[WEBGL]] 3D graphics APIs. </p>
</section>
<section id="sotd">
</section>
<section class="introductory">
<h2>Introduction</h2>
<section>
<p>Audio on the web has been fairly primitive up to this point and until very
recently has had to be delivered through plugins such as Flash and QuickTime.
The introduction of the <code>audio</code> element in HTML5 is very important,
allowing for basic streaming audio playback. But, it is not powerful enough to
handle more complex audio applications. For sophisticated web-based games or
interactive applications, another solution is required. It is a goal of this
specification to include the capabilities found in modern game audio engines as
well as some of the mixing, processing, and filtering tasks that are found in
modern desktop audio production applications. </p>
<p>The APIs have been designed with a wide variety of use cases in mind. Ideally, it should
be able to support <i>any</i> use case which could reasonably be implemented
with an optimized C++ engine controlled via JavaScript and run in a browser.
That said, modern desktop audio software can have very advanced capabilities,
some of which would be difficult or impossible to build with this system.
Apple's Logic Audio is one such application which has support for external MIDI
controllers, arbitrary plugin audio effects and synthesizers, highly optimized
direct-to-disk audio file reading/writing, tightly integrated time-stretching,
and so on. Nevertheless, the proposed system will be quite capable of
supporting a large range of reasonably complex games and interactive
applications, including musical ones. And it can be a very good complement to
the more advanced graphics features offered by WebGL. The API has been designed
so that more advanced capabilities can be added at a later time. </p>
</section>
<section>
<h2 id="Features">Features</h2>
<p>The API supports these primary features: </p>
<ul>
<li><a href="#ModularRouting">Modular routing</a> for simple or
complex mixing/effect architectures, including <a
href="#mixer-gain-structure">multiple sends and submixes</a>.</li>
<li> High dynamic range, using 32bits floats for internal processing. </li>
<li><a href="#AudioParam">Sample-accurate scheduled sound
playback</a> with low <a href="#latency">latency</a> for musical
applications requiring a very high degree of rhythmic precision such as
drum machines and sequencers. This also includes the possibility of <a
href="#DynamicLifetime">dynamic creation</a> of effects. </li>
<li>Automation of audio parameters for envelopes, fade-ins / fade-outs,
granular effects, filter sweeps, LFOs etc. </li>
<li>Flexible handling of channels in an audio stream, allowing them to be split and merged.</li>
<li>Processing of audio sources from an <code>audio</code> or
<code>video</code> <a href="#MediaElementAudioSourceNode">media
element</a>. </li>
<li>Processing live audio input using a <a href="#MediaStreamAudioSourceNode">MediaStream</a>
from getUserMedia().
</li>
<li>Integration with WebRTC
<ul>
<li>Processing audio received from a remote peer using a
<a><code>MediaStreamAudioSourceNode</code></a> and [[!webrtc]].
</li>
<li>Sending a generated or processed audio stream to a remote peer using a
<a><code>MediaStreamAudioDestinationNode</code></a> and [[!webrtc]].
</li>
</ul>
</li>
<li>Audio stream synthesis and processing <a
href="#JavaScriptProcessing">directly in JavaScript</a>. </li>
<li><a href="#Spatialization">Spatialized audio</a> supporting a wide
range of 3D games and immersive environments:
<ul>
<li>Panning models: equal-power, HRTF, pass-through </li>
<li>Distance Attenuation </li>
<li>Sound Cones </li>
<li>Obstruction / Occlusion </li>
<li>Doppler Shift </li>
<li>Source / Listener based</li>
</ul>
</li>
<li>A <a href="#Convolution">convolution engine</a> for a wide range
of linear effects, especially very high-quality room effects. Here are some
examples of possible effects:
<ul>
<li>Small / large room </li>
<li>Cathedral </li>
<li>Concert hall </li>
<li>Cave </li>
<li>Tunnel </li>
<li>Hallway </li>
<li>Forest </li>
<li>Amphitheater </li>
<li>Sound of a distant room through a doorway </li>
<li>Extreme filters</li>
<li>Strange backwards effects</li>
<li>Extreme comb filter effects </li>
</ul>
</li>
<li>Dynamics compression for overall control and sweetening of the mix </li>
<li>Efficient <a href="#the-analysernode-interface">real-time time-domain and
frequency analysis / music visualizer support</a></li>
<li>Efficient biquad filters for lowpass, highpass, and other common filters.
</li>
<li>A Waveshaping effect for distortion and other non-linear effects</li>
<li>Oscillators</li>
</ul>
<section>
<h2 id="ModularRouting">Modular Routing</h2>
<p>Modular routing allows arbitrary connections between different
<a><code>AudioNode</code></a>
objects. Each node can
have <dfn>inputs</dfn> and/or <dfn>outputs</dfn>. A <dfn>source node</dfn> has no inputs
and a single output. A <dfn>destination node</dfn> has
one input and no outputs, the most common example being <a
href="#AudioDestinationNode"><code>AudioDestinationNode</code></a> the final destination to the audio
hardware. Other nodes such as filters can be placed between the source and destination nodes.
The developer doesn't have to worry about low-level stream format details
when two objects are connected together; <a href="#channel-up-mixing-and-down-mixing">the right
thing just happens</a>. For example, if a mono audio stream is connected to a
stereo input it should just mix to left and right channels <a
href="#channel-up-mixing-and-down-mixing">appropriately</a>. </p>
<p>In the simplest case, a single source can be routed directly to the output.
All routing occurs within an <a
href="#AudioContext"><code>AudioContext</code></a> containing a single
<a href="#AudioDestinationNode"><code>AudioDestinationNode</code></a>:
</p>
<figure>
<img alt="modular routing" src="images/modular-routing1.png">
<figcaption>A simple example of modular routing.</figcaption>
</figure>
<p>Illustrating this simple routing, here's a simple example playing a single
sound: </p>
<pre class="highlight example">
var context = new AudioContext();
function playSound() {
var source = context.createBufferSource();
source.buffer = dogBarkingBuffer;
source.connect(context.destination);
source.start(0);
}
</pre>
<p>Here's a more complex example with three sources and a convolution reverb
send with a dynamics compressor at the final output stage: </p>
<figure>
<img alt="modular routing2" src="images/modular-routing2.png" />
<figcaption>A more complex example of modular routing.</figcaption>
</figure>
<pre class="highlight example">
var context = 0;
var compressor = 0;
var reverb = 0;
var source1 = 0;
var source2 = 0;
var source3 = 0;
var lowpassFilter = 0;
var waveShaper = 0;
var panner = 0;
var dry1 = 0;
var dry2 = 0;
var dry3 = 0;
var wet1 = 0;
var wet2 = 0;
var wet3 = 0;
var masterDry = 0;
var masterWet = 0;
function setupRoutingGraph () {
context = new AudioContext();
// Create the effects nodes.
lowpassFilter = context.createBiquadFilter();
waveShaper = context.createWaveShaper();
panner = context.createPanner();
compressor = context.createDynamicsCompressor();
reverb = context.createConvolver();
// Create master wet and dry.
masterDry = context.createGain();
masterWet = context.createGain();
// Connect final compressor to final destination.
compressor.connect(context.destination);
// Connect master dry and wet to compressor.
masterDry.connect(compressor);
masterWet.connect(compressor);
// Connect reverb to master wet.
reverb.connect(masterWet);
// Create a few sources.
source1 = context.createBufferSource();
source2 = context.createBufferSource();
source3 = context.createOscillator();
source1.buffer = manTalkingBuffer;
source2.buffer = footstepsBuffer;
source3.frequency.value = 440;
// Connect source1
dry1 = context.createGain();
wet1 = context.createGain();
source1.connect(lowpassFilter);
lowpassFilter.connect(dry1);
lowpassFilter.connect(wet1);
dry1.connect(masterDry);
wet1.connect(reverb);
// Connect source2
dry2 = context.createGain();
wet2 = context.createGain();
source2.connect(waveShaper);
waveShaper.connect(dry2);
waveShaper.connect(wet2);
dry2.connect(masterDry);
wet2.connect(reverb);
// Connect source3
dry3 = context.createGain();
wet3 = context.createGain();
source3.connect(panner);
panner.connect(dry3);
panner.connect(wet3);
dry3.connect(masterDry);
wet3.connect(reverb);
// Start the sources now.
source1.start(0);
source2.start(0);
source3.start(0);
}
</pre>
</section>
</section>
<section>
<h2 id="APIOverview">API Overview</h2>
<p>The interfaces defined are: </p>
<ul>
<li>An <a class="dfnref" href="#AudioContext">AudioContext</a>
interface, which contains an audio signal graph representing connections
betweens <a><code>AudioNode</code></a>s. </li>
<li>An <a><code>AudioNode</code></a> interface,
which represents audio sources, audio outputs, and intermediate processing
modules. <a><code>AudioNode</code></a>s can be dynamically connected together in a <a
href="#ModularRouting">modular fashion</a>. <a><code>AudioNode</code></a>s
exist in the context of an <a><code>AudioContext</code></a> </li>
<li>An <a><code>AudioDestinationNode</code></a> interface, an
<a><code>AudioNode</code></a> subclass representing the final destination for all rendered
audio. </li>
<li>An <a><code>AudioBuffer</code></a>
interface, for working with memory-resident audio assets. These can
represent one-shot sounds, or longer audio clips. </li>
<li>An <a><code>AudioBufferSourceNode</code></a> interface,
an <a><code>AudioNode</code></a> which generates audio from an AudioBuffer. </li>
<li>A <a><code>MediaElementAudioSourceNode</code></a>
interface, an <a><code>AudioNode</code></a> which is the audio source from an
<code>audio</code>, <code>video</code>, or other media element. </li>
<li>A <a><code>MediaStreamAudioSourceNode</code></a>
interface, an <a><code>AudioNode</code></a> which is the audio source from a
MediaStream such as live audio input, or from a remote peer. </li>
<li>A <a><code>MediaStreamAudioDestinationNode</code></a>
interface, an <a><code>AudioNode</code></a> which is the audio destination to a
MediaStream sent to a remote peer. </li>
<li>An <a><code>AudioWorker</code></a> interface, a
<a><code>WebWorker</code></a> designed to enable processing audio directly in JavaScript in a Worker.</li>
<li>An <a><code>AudioWorkerNode</code></a> interface, an
<a><code>AudioNode</code></a> for connecting the node graph to an AudioWorker. </li>
<li>A <a><code>ScriptProcessorNode</code></a> interface, an
<a><code>AudioNode</code></a> for generating or processing audio directly in JavaScript. </li>
<li>An <a><code>AudioProcessingEvent</code></a> interface,
which is an event type used with <a><code>ScriptProcessorNode</code></a> objects.
</li>
<li>An <a><code>AudioParam</code></a> interface,
for controlling an individual aspect of an <a><code>AudioNode</code></a>'s functioning, such as
volume. </li>
<li>An <a><code>GainNode</code></a>
interface, for explicit gain control. Because inputs to
<a><code>AudioNode</code></a>s support
multiple connections (as a unity-gain summing junction), mixers can be <a
href="#mixer-gain-structure">easily built</a> with GainNodes.
</li>
<li>A <a><code>BiquadFilterNode</code></a>
interface, an <a><code>AudioNode</code></a> for common low-order filters such as:
<ul>
<li>Low Pass</li>
<li>High Pass </li>
<li>Band Pass </li>
<li>Low Shelf </li>
<li>High Shelf </li>
<li>Peaking </li>
<li>Notch </li>
<li>Allpass </li>
</ul>
</li>
<li>A <a><code>DelayNode</code></a> interface, an
<a><code>AudioNode</code></a> which applies a dynamically adjustable variable delay. </li>
<li>A <a><code>PannerNode</code></a>
interface, for spatializing / positioning audio in 3D space. </li>
<li>An <a><code>AudioListener</code></a>
interface, which works with a <a>PannerNode</a> for
spatialization. </li>
<li>A <a><code>StereoPannerNode</code></a>
interface, for equal-power positioning of audio input in a stereo stream. </li>
<li>A <a><code>ConvolverNode</code></a>
interface, an <a><code>AudioNode</code></a> for applying a <a
href="#Convolution">real-time linear effect</a> (such as the sound
of a concert hall). </li>
<li>A <a><code>AnalyserNode</code></a> interface,
for use with music visualizers, or other visualization applications. </li>
<li>A <a><code>ChannelSplitterNode</code></a> interface,
for accessing the individual channels of an audio stream in the routing
graph. </li>
<li>A <a><code>ChannelMergerNode</code></a> interface, for
combining channels from multiple audio streams into a single audio stream.
</li>
<li>A <a><code>DynamicsCompressorNode</code></a> interface, an
<a><code>AudioNode</code></a> for dynamics compression. </li>
<li>A <a><code>WaveShaperNode</code></a>
interface, an <a><code>AudioNode</code></a> which applies a non-linear waveshaping effect for
distortion and other more subtle warming effects. </li>
<li>A <a><code>OscillatorNode</code></a>
interface, an audio source generating a periodic waveform. </li>
</ul>
</section>
</section>
<section id="conformance">
<p>The following conformance classes are defined by this specification: </p>
<dl>
<dt><dfn id="dfn-conforming-implementation">conforming
implementation</dfn></dt>
<dd><p>A user agent is considered to be a <a class="dfnref"
href="#dfn-conforming-implementation">conforming implementation</a> if it
satisfies all of the MUST-, REQUIRED- and SHALL-level criteria in this specification that
apply to implementations. </p>
</dd>
</dl>
<p>
User agents that use ECMAScript to implement the APIs defined in this
specification must implement them in a manner consistent with the ECMAScript
Bindings defined in the Web IDL specification [[!WEBIDL]] as this
specification uses that specification and terminology.
</p>
</section>
<section id=audioapi>
<h2 id="API">The Audio API</h2>
<section>
<h2 id="AudioContext">The AudioContext Interface</h2>
<p>This interface represents a set of <a><code>AudioNode</code></a> objects and their
connections. It allows for arbitrary routing of signals to the
<a><code>AudioDestinationNode</code></a>
(what the user ultimately hears). Nodes are created from the context and are
then <a href="#ModularRouting">connected</a> together. In most use
cases, only a single <a><code>AudioContext</code></a> is used per document.</p>
<dl title="enum AudioContextState" class="idl">
<dt>suspended</dt>
<dd>The AudioContext is currently suspended (context time is not proceeding, audio hardware may be powered down/released).</dd>
<dt>running</dt>
<dd>Audio is being processed.</dd>
<dt>closed</dt>
<dd>The AudioContext has been released, and can no longer be used to process audio. All system audio resources have been released. Attempts to create new Nodes on the AudioContext will throw InvalidStateError. (AudioBuffers may still be created, through <a>createBuffer</a> or <a>decodeAudioData</a>.)</dd>
</dl>
<dl title="[Constructor] interface AudioContext : EventTarget" class="idl"
data-merge="DecodeSuccessCallback DecodeErrorCallback">
<dt>readonly attribute AudioDestinationNode destination</dt>
<dd><p>An <a
href="#AudioDestinationNode"><code>AudioDestinationNode</code></a>
with a single input representing the final destination for all audio.
Usually this will represent the actual audio hardware.
All <a><code>AudioNode</code></a>s actively rendering
audio will directly or indirectly connect to <a
href="#widl-AudioContext-destination"><code>destination</code></a>.</p>
</dd>
<dt> readonly attribute float sampleRate</dt>
<dd><p>The sample rate (in sample-frames per second) at which the
<a><code>AudioContext</code></a> handles audio. It is assumed that all <a><code>AudioNode</code></a>s in the
context run at this rate. In making this assumption, sample-rate
converters or "varispeed" processors are not supported in real-time
processing.</p>
</dd>
<dt> readonly attribute double currentTime</dt>
<dd><p>This is a time in seconds which starts at zero when the context is
created and increases in real-time. All scheduled times are relative to
it. This is not a "transport" time which can be started, paused, and
re-positioned. It is always moving forward. A GarageBand-like timeline
transport system can be very easily built on top of this (in JavaScript).
This time corresponds to an ever-increasing hardware timestamp. </p>
</dd>
<dt> readonly attribute AudioListener listener </dt>
<dd><p>An <a href="#AudioListener"><code>AudioListener</code></a>
which is used for 3D <a
href="#Spatialization">spatialization</a>.</p>
</dd>
<dt>readonly attribute AudioContextState state</dt>
<dd>Describes the current state of the AudioContext. The context state MUST begin in "suspended", and transitions to "running" when system resources are acquired and audio has begun processing. For OfflineAudioContexts, the state will remain in "suspended" until startRendering()</code> is called, at which point it will transition to "running", and then to "closed" once audio processing has completed and oncomplete has been fired.
<p>When the state is "suspended", a call to <code>resume()</code> will cause a transition to "running", or a call to <code>close()</code> will cause a transition to "closed".</p>
<p>When the state is "running", a call to <code>suspend()</code> will cause a transition to "suspended", or a call to <code>close()</code> will cause a transition to "closed".</p>
<p>When the state is "closed", no further state transitions are possible.</p>
</dd>
<dt>Promise suspend()</dt>
<dd>Suspends the progression of time in the audio context, allows any current context processing blocks that are already processed to be played to the destination, and then allows the system to release its claim on audio hardware. This is generally useful when the application knows it will not need the AudioContext for some time, and wishes to let the audio hardware power down. The promise resolves when the frame buffer is empty (has been handed off to the hardware), or immediately (with no other effect) if the context is already suspended. The promise is rejected if the context has been closed. This method will cause an INVALID_STATE_ERR exception to be thrown if called on an OfflineAudioContext.
<p>While the system is suspended, MediaStreams will have their output ignored; that is, data will be lost by the real time nature of media streams. HTMLMediaElements will similarly have their output ignored until the system is resumed. Audio Workers and ScriptProcessorNodes will simply not fire their onaudioprocess events while suspended, but will resume when resumed. For the purpose of AnalyserNode window functions, the data is considered as a continuous stream - i.e. the resume()/suspend() does not cause silence to appear in the AnalyserNode's stream of data.</p></dd>
<dt>Promise resume()</dt>
<dd>Resumes the progression of time in an audio context that has been suspended, which may involve re-priming the frame buffer contents. The promise resolves when the system has re-acquired (if necessary) access to audio hardware and has begun streaming to the destination, or immediately (with no other effect) if the context is already running. The promise is rejected if the context has been closed. If the context is not currently suspended, the promise will resolve. This method will cause an INVALID_STATE_ERR exception to be thrown if called on an OfflineAudioContext.</dd>
<dt>Promise close()</dt>
<dd>Closes the audio context, releasing any system audio resources used by the <a>AudioContext</a>. This will not automatically release all AudioContext-created objects, unless other references have been released as well; however, it will forcibly release any system audio resources that might prevent additional AudioContexts from being created and used, suspend the progression of audio time in the audio context, and stop processing audio data. The promise resolves when all AudioContext-creation-blocking resources have been released. This method will cause an INVALID_STATE_ERR exception to be thrown if called on an OfflineAudioContext.</dd>
<dt>attribute EventHandler onstatechange</dt>
<dd>
A property used to set the <code>EventHandler</code> for an event that is dispatched to
<a><code>AudioContext</code></a> when the state of the AudioContext has changed (i.e. when the
corresponding promise would have resolved). An event of type <a><code>Event</code></a> will be
dispatched to the event handler, which can query the AudioContext's state directly. A
newly-created AudioContext will always begin in the "suspended" state, and a state change event will be fired
whenever the state changes to a different state.
</dd>
<dt>AudioBuffer createBuffer()</dt>
<dd> Creates an AudioBuffer of the given size. The audio data in the buffer will
be zero-initialized (silent). An NotSupportedError exception MUST be thrown
if any of the arguments is negative, zero, or outside its nominal range.
<dl class="parameters">
<dt>unsigned long numberOfChannels</dt>
<dd> Determines how many channels the buffer will have. An implementation
must support at least 32 channels.</dd>
<dt>unsigned long length</dt>
<dd>Determines the size of the buffer in sample-frames.</dd>
<dt>float sampleRate</dt>
<dd>Describes the sample-rate of the linear PCM audio data in the buffer in
sample-frames per second. An implementation must support sample-rates in
at least the range 22050 to 96000.</dd>
</dl>
</dd>
<dt> Promise<AudioBuffer> decodeAudioData() </dt>
<dd>
Asynchronously decodes the audio file data contained in the ArrayBuffer.
The ArrayBuffer can, for example, be loaded from an XMLHttpRequest's
<code>response</code> attribute after setting the <code>responseType</code>
to "arraybuffer". Audio file data can be in any of the formats supported by
the <code>audio</code> element.
<dl class=parameters>
<dt> ArrayBuffer audioData </dt>
<dd> An ArrayBuffer containing compressed audio data </dd>
<dt> optional DecodeSuccessCallback successCallback </dt>
<dd>
A callback function which will be invoked when the decoding is finished.
The single argument to this callback is an AudioBuffer representing the
decoded PCM audio data.
</dd>
<dt> optional DecodeErrorCallback errorCallback </dt>
<dd>
A callback function which will be invoked if there is an error
decoding the audio file.
</dd>
</dl>
<p>
Although the primary method of interfacing with this function is via its promise return value, the callback
parameters are provided for legacy reasons.
</p>
<p>
The following steps must be performed:
</p>
<ol>
<li>Let <var>promise</var> be a new promise.</li>
<li>If <a>audioData</a> is null or not a valid ArrayBuffer:
<ol>
<li>Let <var>error</var> be a <code>DOMException</code> whose name is <code>NotSupportedError</code>.</li>
<li>Reject <var>promise</var> with <var>error</var>.</li>
<li>If <dfn>errorCallback</dfn> is not missing, invoke <dfn>errorCallback</dfn> with <var>error</var>.</li>
<li>Terminate this algorithm.
</ol>
<li> Neuter the <a>audioData</a> ArrayBuffer in such a way that JavaScript code may not
access or modify the data anymore.</li>
<li>Queue a decoding operation to be performed on another thread.</li>
<li>Return <var>promise</var>.</li>
<li>In the decoding thread:
<ol>
<li>Attempt to decode the encoded <a>audioData</a> into linear PCM.</li>
<li>If a decoding error is encountered due to the audio format not being recognized or supported, or because
of corrupted/unexpected/inconsistent data, then, on the main thread's event loop:
<ol>
<li>Let <var>error</var> be a <code>DOMException</code> whose name is <code>"EncodingError"</code>.</li>
<li>Reject <var>promise</var> with <var>error</var>.</li>
<li>If <dfn>errorCallback</dfn> is not missing, invoke <a>errorCallback</a> with <var>error</var>.</li>
</ol>
<li>Otherwise:
<ol>
<li>Take the result, representing the decoded linear PCM audio data, and resample it to the sample-rate of
the <a><code>AudioContext</code></a> if it is different from the sample-rate of <a>audioData</a>.</li>
<li>On the main thread's event loop:
<ol>
<li>Let <var>buffer</var> be an <code>AudioBuffer</code> containing the final result (after possibly
sample-rate converting).</li>
<li>Resolve <var>promise</var> with <var>buffer</var>.</li>
<li>If <dfn>successCallback</dfn> is not missing, invoke <a>successCallback</a> with <var>buffer</var>.
</li>
</ol>
</li>
</ol>
</li>
</ol>
</li>
</ol>
</dd>
<dt> AudioBufferSourceNode createBufferSource() </dt>
<dd>Creates an <a><code>AudioBufferSourceNode</code></a>.
</dd>
<dt>MediaElementAudioSourceNode createMediaElementSource()</dt>
<dd>
Creates a <a href="#MediaElementAudioSourceNode">MediaElementAudioSourceNode</a>
given an HTMLMediaElement. As a consequence of calling this method, audio
playback from the HTMLMediaElement will be re-routed into the processing graph
of the <a><code>AudioContext</code></a>.
<dl class=parameters>
<dt> HTMLMediaElement mediaElement </dt>
<dd> The media element that will be re-routed. </dd>
</dl>
</dd>
<dt> MediaStreamAudioSourceNode createMediaStreamSource() </dt>
<dd>
<dl class=parameters>
<dt>MediaStream mediaStream</dt>
<dd> The media stream that will act as source. </dd>
</dl>
</dd>
<dt> MediaStreamAudioDestinationNode createMediaStreamDestination() </dt>
<dd>Creates a <a><code>MediaStreamAudioDestinationNode</code></a> </dd>
<dt>AudioWorker createAudioWorker()</dt>
<dd>
The <dfn id="createAudioWorker-context"><code>createAudioWorker</code></dfn> method creates an <a><code>AudioWorker</code></a> and its associated <a><code>AudioWorkerGlobalScope</code></a>
for direct audio processing using JavaScript. An <a><code>AudioWorker</code></a> acts as a factory for <a><code>AudioWorkerNode</code></a> instances,
allowing the nodes to be created synchronously on demand. Because script loading must be performed asynchronously, newly created nodes may not be able to
process audio immediately on creation. However once the <a>AudioWorker</a>'s script has been loaded and run, which can be detected via the <code</code> event callback,
nodes can be created with the assurance of no script-induced delays.
<dl class=parameters>
<dt> DOMString scriptURL </dt>
<dd>
This parameter represents the URL of the script to be loaded as an <a>AudioWorker</a>.
</dd>
</dl>
</dd>
<dt> ScriptProcessorNode createScriptProcessor()</dt>
<dd>
This method is DEPRECATED, as it is intended to be replaced by createAudioWorker.
Creates a <a><code>ScriptProcessorNode</code></a> for direct audio processing using
JavaScript. An IndexSizeError exception MUST be thrown if
<a><code>bufferSize</code></a> or <a><code>numberOfInputChannels</code></a> or
<a><code>numberOfOutputChannels</code></a> are outside the valid range.
<dl class=parameters>
<dt> optional unsigned long bufferSize = 0 </dt>
<dd>
The <a><code>bufferSize</code></a> parameter determines the buffer size in units of
sample-frames. If it's not passed in, or if the value is 0, then the
implementation will choose the best buffer size for the given
environment, which will be constant power of 2 throughout the lifetime
of the node. Otherwise if the author explicitly specifies the
bufferSize, it must be one of the following values: 256, 512, 1024,
2048, 4096, 8192, 16384. This value controls how frequently the
<a href="#audioprocess-spnode"><code>audioprocess</code></a> event is
dispatched and how many sample-frames need to be processed each call.
Lower values for <a><code>bufferSize</code></a>
will result in a lower (better) <a href="#latency">latency</a>. Higher
values will be necessary to avoid audio breakup and
<a href="#audio-glitching">glitches</a>.
It is recommended for authors to not specify this buffer size and
allow the implementation to pick a good buffer size to balance between
<a href="#latency">latency</a> and audio quality.
</dd>
<dt> optional unsigned long numberOfInputChannels = 2 </dt>
<dd>
This parameter determines the number of channels for this node's
input. Values of up to 32 must be supported.
</dd>
<dt> optional unsigned long numberOfOutputChannels = 2 </dt>
<dd>
This parameter determines the number of channels for this node's
output. Values of up to 32 must be supported.
</dd>
</dl>
It is invalid for both <a><code>numberOfInputChannels</code></a> and
<a><code>numberOfOutputChannels</code></a> to be zero.
</dd>
<dt> AnalyserNode createAnalyser() </dt>
<dd>Create an <a><code>AnalyserNode</code></a>.</dd>
<dt> GainNode createGain() </dt>
<dd>Create an <a><code>GainNode</code></a>.</dd>
<dt> DelayNode createDelay() </dt>
<dd>
Creates a <a><code>DelayNode</code></a> representing a variable delay line. The
initial default delay time will be 0 seconds.
<dl class=parameters>
<dt> optional double maxDelayTime = 1.0 </dt>
<dd>
The <dfn>maxDelayTime</dfn> parameter is optional and specifies the
maximum delay time in seconds allowed for the delay line. If
specified, this value MUST be greater than zero and less than three
minutes or a NotSupportedError exception MUST be thrown.
</dd>
</dl>
</dd>
<dt> BiquadFilterNode createBiquadFilter() </dt>
<dd>Creates a <a><code>BiquadFilterNode</code></a>
representing a second order filter which can be configured as one of
several common filter types.
</dd>
<dt> WaveShaperNode createWaveShaper() </dt>
<dd>
Creates a <a><code>WaveShaperNode</code></a> representing a non-linear distortion.
</dd>
<dt> PannerNode createPanner() </dt>
<dd>
Creates a <a><code>PannerNode</code></a>.
</dd>
<dt> StereoPannerNode createStereoPanner() </dt>
<dd>
Creates a <a><code>StereoPannerNode</code></a>.
</dd>
<dt> ConvolverNode createConvolver() </dt>
<dd>
Creates a <a><code>ConvolverNode</code></a>.
</dd>
<dt> ChannelSplitterNode createChannelSplitter() </dt>
<dd>
Creates an <a><code>ChannelSplitterNode</code></a>
representing a channel splitter. An IndexSizeError exception MUST be thrown
for invalid parameter values.
<dl class=parameters>
<dt> optional unsigned long numberOfOutputs = 6 </dt>
<dd>
The number of outputs. Values of up to 32 must be supported. If not
specified, then 6 will be used.
</dd>
</dl>
</dd>
<dt> ChannelMergerNode createChannelMerger() </dt>
<dd>
Creates a <a><code>ChannelMergerNode</code></a> representing a channel merger. An
IndexSizeError exception MUST be thrown for invalid parameter values.
<dl class=parameters>
<dt> optional unsigned long numberOfInputs = 6 </dt>
<dd>
The <dfn>numberOfInputs</dfn> parameter determines the number of inputs.
Values of up to 32 must be supported. If not specified, then 6 will be
used.
</dd>
</dl>
</dd>
<dt> DynamicsCompressorNode createDynamicsCompressor() </dt>
<dd>Creates a <a><code>DynamicsCompressorNode</code></a></dd>
<dt> OscillatorNode createOscillator() </dt>
<dd>Creates an <a><code>OscillatorNode</code></a></dd>
<dt> PeriodicWave createPeriodicWave() </dt>
<dd>
Creates a <a><code>PeriodicWave</code></a> representing a waveform
containing arbitrary harmonic content. The <code>real</code> and
<code>imag</code> parameters must be of type <code>Float32Array</code>
(described in [[!TYPED-ARRAYS]]) of equal lengths greater than zero and less
than or equal to 4096 or an IndexSizeError exception MUST be thrown. These
parameters specify the Fourier coefficients of a
<a href="http://en.wikipedia.org/wiki/Fourier_series">Fourier series</a>
representing the partials of a periodic waveform. The created
<a><code>PeriodicWave</code></a> will be used with an
<a><code>OscillatorNode</code></a> and will represent a <em>normalized</em>
time-domain waveform having maximum absolute peak value of 1. Another way of
saying this is that the generated waveform of an
<a><code>OscillatorNode</code></a> will have maximum peak value at 0dBFS.
Conveniently, this corresponds to the full-range of the signal values used by
the Web Audio API. Because the PeriodicWave will be normalized on creation,
the <code>real</code> and <code>imag</code> parameters represent
<em>relative</em> values.
<p>
As <a>PeriodicWave</a> objects maintain their own representation, any
modification of the arrays uses as the <code>real</code> and
<code>imag</code> parameters after the call to
<a href="#widl-AudioContext-createPeriodicWave-PeriodicWave-Float32Array-real-Float32Array-imag">
<code>createPeriodicWave()</code></a> will have no effect on the
<a>PeriodicWave</a> object.
</p>
<dl class=parameters>
<dt> Float32Array real </dt>
<dd>
The <dfn id="dfn-real">real</dfn> parameter represents an array of
<code>cosine</code> terms (traditionally the A terms). In audio
terminology, the first element (index 0) is the DC-offset of the periodic
waveform and is usually set to zero. The second element (index 1)
represents the fundamental frequency. The third element represents the
first overtone, and so on.
</dd>
<dt>Float32Array imag</dt>
<dd>
The <dfn id="dfn-imag">imag</dfn> parameter represents an array of
<code>sine</code> terms (traditionally the B terms). The first element
(index 0) should be set to zero (and will be ignored) since this term does
not exist in the Fourier series. The second element (index 1) represents
the fundamental frequency. The third element represents the first
overtone, and so on.
</dd>
</dl>
</dd>
</dl>
<dl title="callback DecodeSuccessCallback = void" class="idl">
<dt> AudioBuffer decodedData </dt>
<dd> The AudioBuffer containing the decoded audio data. </dd>
</dl>
<dl title="callback DecodeErrorCallback = void" class="idl">
<dt> DOMException error </dt>
<dd> The error that occurred while decoding. </dd>
</dl>
<section>
<h3 id="lifetime-AudioContext" class="informative">Lifetime</h3>
<p>
Once created, an <code>AudioContext</code> will continue to play sound until it has no more sound to play, or
the page goes away.
</p>
</section>
<section class=informative>
<h3>Lack of introspection or serialization primitives</h3>
<p>
The Web Audio API takes a <em>fire-and-forget</em> approach to audio source
scheduling. That is, <a>source nodes</a> are created for each note during the
lifetime of the <a>AudioContext</a>, and never explicitely removed from the
graph. This is incompatible with a serialization API, since there is no stable
set of nodes that could be serialized.
</p>
<p>
Moreover, having an introspection API would allow content script to be able to
observe garbage collections.
</p>
</section>
</section>
<section>
<h2 id="OfflineAudioContext">The OfflineAudioContext Interface</h2>
<p><code>OfflineAudioContext</code> is a particular type of <a><code>AudioContext</code></a> for rendering/mixing-down
(potentially) faster than real-time. It does not render to the audio hardware, but instead renders as quickly as
possible, fulfilling the returned promise with the rendered result as an <code>AudioBuffer</code>.</p>
<p>Each <code>OfflineAudioContext</code> instance has an associated
<dfn id="#offline-audio-context-rendering-started-flag">rendering started flag</dfn> that is initially
<code>false</code>.</p>
<dl title='[Constructor(unsigned long numberOfChannels, unsigned long length, float sampleRate)] interface OfflineAudioContext : AudioContext' class='idl'>
<dt>Promise<AudioBuffer> startRendering()</dt>
<dd>
<p>Given the current connections and scheduled changes, starts rendering audio.</p>
<p>Although the primary method of getting the rendered audio data is via its promise return value, the instance
will also fire an event named <code>complete</code> for legacy reasons.</p>
<p>The following steps must be performed:</p>
<ol>
<li>If this instance's <a href="#offline-audio-context-rendering-started-flag">rendering started flag</a> is
<code>true</code>, return a promise rejected with a <code>DOMException</code> whose name is
<code>"InvalidStateError"</code>.</li>
<li>Set this instance's <a href="#offline-audio-context-rendering-started-flag">rendering started flag</a> to
<code>true</code>.</li>
<li>Let <var>promise</var> be a new promise.</li>
<li>Asynchronously perform the following steps:
<ol>
<li>Let <var>buffer</var> be a new <code>AudioBuffer</code>, with a
number of channels, length and sample rate equal respectively to the
<code>numberOfChannels</code>, <code>length</code> and
<code>sampleRate</code> parameters used when this instance's constructor was called.</li>
<li>Given the current connections and scheduled changes, start
rendering <code>length</code> sample-frames of audio into <var>buffer</var>.</li>
<li>Once the rendering is complete,
<ol>
<li>Resolve <var>promise</var> with <var>buffer</var>.</li>
<li>Fire a simple event named <code>complete</code> at this instance, using an instance of
<a><code>OfflineAudioCompletionEvent</code></a> whose
<code>renderedBuffer</code> property is set to <var>buffer</var>.</li>
</ol>
</li>
</ol>
</li>
<li>Return <var>promise</var>.</li>
</ol>
</dd>
<dt>attribute EventHandler oncomplete</dt>
<dd><p>An EventHandler of type <a href="#OfflineAudioCompletionEvent">OfflineAudioCompletionEvent</a>.</p></dd>
</dl>
<section>
<h2 id="OfflineAudioCompletionEvent">The OfflineAudioCompletionEvent Interface</h2>
<p>This is an <code>Event</code> object which is dispatched to <a><code>OfflineAudioContext</code></a> for legacy
reasons.</p>
<dl title="interface OfflineAudioCompletionEvent : Event" class="idl">
<dt>readonly attribute AudioBuffer renderedBuffer</dt>
<dd><p>An <code>AudioBuffer</code> containing the rendered audio data.</p></dd>
</dl>
</section>
</section>
<section>
<h2>The <dfn>AudioNode</dfn> Interface</h2>
<p>AudioNodes are the building blocks of an <a
href="#AudioContext"><code>AudioContext</code></a>. This interface
represents audio sources, the audio destination, and intermediate processing
modules. These modules can be connected together to form <a
href="#ModularRouting">processing graphs</a> for rendering audio to the
audio hardware. Each node can have <a>inputs</a> and/or <a>outputs</a>.
A <a>source node</a> has no inputs
and a single output. An <a
href="#AudioDestinationNode"><code>AudioDestinationNode</code></a> has
one input and no outputs and represents the final destination to the audio
hardware. Most processing nodes such as filters will have one input and one
output. Each type of <a><code>AudioNode</code></a> differs in the details of how it processes or synthesizes audio. But, in general, an <a><code>AudioNode</code></a>
will process its inputs (if it has any), and generate audio for its outputs (if it has any).
</p>
<p>
Each output has one or more channels. The exact number of
channels depends on the details of the specific <a><code>AudioNode</code></a>.
</p>
<p>
An output may connect to one or more <a><code>AudioNode</code></a>
inputs, thus <em>fan-out</em> is supported. An input initially has no connections,
but may be connected from one
or more <a>AudioNode</a> outputs, thus <em>fan-in</em> is supported. When the <code>connect()</code> method is called to connect
an output of an <a>AudioNode</a> to an input of an
<a>AudioNode</a>, we call that a <dfn>connection</dfn> to the input.
</p>
<p>
Each <a>AudioNode</a> <dfn>input</dfn> has a specific number of
channels at any given time. This number can change depending on the <a>connection</a>(s)
made to the input. If the input has no connections then it has one channel which is silent.
</p>
<p>
For each <a>input</a>, an <a><code>AudioNode</code> </a> performs a mixing
(usually an up-mixing) of all connections to that input.