From 33058f2b292b3a581333bdfb21b8f671898c5060 Mon Sep 17 00:00:00 2001 From: Peter Bengtsson Date: Tue, 8 Dec 2020 14:40:17 -0500 Subject: initial commit --- .../web/api/audiocontext/audiocontext/index.html | 99 +++++++++ .../web/api/audiocontext/baselatency/index.html | 54 +++++ files/zh-cn/web/api/audiocontext/close/index.html | 115 +++++++++++ .../web/api/audiocontext/createanalyser/index.html | 154 ++++++++++++++ .../api/audiocontext/createbiquadfilter/index.html | 139 +++++++++++++ .../web/api/audiocontext/createbuffer/index.html | 181 +++++++++++++++++ .../api/audiocontext/createbuffersource/index.html | 150 ++++++++++++++ .../audiocontext/createchannelmerger/index.html | 143 +++++++++++++ .../audiocontext/createchannelsplitter/index.html | 138 +++++++++++++ .../api/audiocontext/createconvolver/index.html | 131 ++++++++++++ .../web/api/audiocontext/createdelay/index.html | 213 ++++++++++++++++++++ .../createmediaelementsource/index.html | 167 +++++++++++++++ .../createmediastreamdestination/index.html | 161 +++++++++++++++ .../createmediastreamsource/index.html | 180 +++++++++++++++++ .../audiocontext/createscriptprocessor/index.html | 199 ++++++++++++++++++ .../api/audiocontext/createwaveshaper/index.html | 133 ++++++++++++ .../web/api/audiocontext/currenttime/index.html | 112 +++++++++++ .../api/audiocontext/decodeaudiodata/index.html | 223 +++++++++++++++++++++ .../web/api/audiocontext/destination/index.html | 114 +++++++++++ files/zh-cn/web/api/audiocontext/index.html | 107 ++++++++++ .../zh-cn/web/api/audiocontext/listener/index.html | 112 +++++++++++ .../audiocontext/mozaudiochanneltype/index.html | 95 +++++++++ .../web/api/audiocontext/onstatechange/index.html | 101 ++++++++++ files/zh-cn/web/api/audiocontext/resume/index.html | 119 +++++++++++ .../web/api/audiocontext/samplerate/index.html | 112 +++++++++++ files/zh-cn/web/api/audiocontext/state/index.html | 111 ++++++++++ .../zh-cn/web/api/audiocontext/suspend/index.html | 115 +++++++++++ 27 files changed, 3678 insertions(+) create mode 100644 files/zh-cn/web/api/audiocontext/audiocontext/index.html create mode 100644 files/zh-cn/web/api/audiocontext/baselatency/index.html create mode 100644 files/zh-cn/web/api/audiocontext/close/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createanalyser/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createbuffer/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createbuffersource/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createchannelmerger/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createconvolver/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createdelay/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createmediaelementsource/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createmediastreamdestination/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createmediastreamsource/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html create mode 100644 files/zh-cn/web/api/audiocontext/createwaveshaper/index.html create mode 100644 files/zh-cn/web/api/audiocontext/currenttime/index.html create mode 100644 files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html create mode 100644 files/zh-cn/web/api/audiocontext/destination/index.html create mode 100644 files/zh-cn/web/api/audiocontext/index.html create mode 100644 files/zh-cn/web/api/audiocontext/listener/index.html create mode 100644 files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html create mode 100644 files/zh-cn/web/api/audiocontext/onstatechange/index.html create mode 100644 files/zh-cn/web/api/audiocontext/resume/index.html create mode 100644 files/zh-cn/web/api/audiocontext/samplerate/index.html create mode 100644 files/zh-cn/web/api/audiocontext/state/index.html create mode 100644 files/zh-cn/web/api/audiocontext/suspend/index.html (limited to 'files/zh-cn/web/api/audiocontext') diff --git a/files/zh-cn/web/api/audiocontext/audiocontext/index.html b/files/zh-cn/web/api/audiocontext/audiocontext/index.html new file mode 100644 index 0000000000..a7f5a96d1f --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/audiocontext/index.html @@ -0,0 +1,99 @@ +--- +title: AudioContext() +slug: Web/API/AudioContext/AudioContext +tags: + - 媒体 + - 音频 +translation_of: Web/API/AudioContext/AudioContext +--- +

{{APIRef("Web Audio API")}}{{SeeCompatTable}}

+ +

AudioContext() 构造方法创建了一个新的 {{domxref("AudioContext")}} 对象 它代表了一个由音频模块链接而成的音频处理图, 每一个模块由 {{domxref("AudioNode")}} 表示.

+ +

语法

+ +
var audioContext = new AudioContext(options)
+ +

参数

+ +
+
options {{optional_inline}}
+
Options 如下所示: +
    +
  • latencyHint: 这个参数表示了重放的类型, 参数是播放效果和资源消耗的一种权衡。可接受的值有 "balanced", "interactive" 和"playback",默认值为 "interactive"。意思是 "平衡音频输出延迟和资源消耗", "提供最小的音频输出延迟最好没有干扰"和 "对比音频输出延迟,优先重放不被中断"。我们也可以用一个双精度的值来定义一个秒级的延迟数值做到更精确的控制。
  • +
+
+
+ +

规范

+ + + + + + + + + + + + + + +
规范状态说明
{{SpecName('Web Audio API','#AudioContext','AudioContext()')}}{{Spec2('Web Audio API')}}Initial definition.
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(55.0)}}{{CompatUnknown}}{{CompatUnknown}}{{CompatOpera(42)}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidAndroid WebviewFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatNo}}{{CompatChrome(55.0)}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatOperaMobile(42)}}{{CompatUnknown}}{{CompatChrome(55.0)}}
+
diff --git a/files/zh-cn/web/api/audiocontext/baselatency/index.html b/files/zh-cn/web/api/audiocontext/baselatency/index.html new file mode 100644 index 0000000000..219fc42429 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/baselatency/index.html @@ -0,0 +1,54 @@ +--- +title: AudioContext.baseLatency +slug: Web/API/AudioContext/baseLatency +translation_of: Web/API/AudioContext/baseLatency +--- +

{{SeeCompatTable}}{{APIRef("Web Audio API")}}

+ +

The baseLatency read-only property of the {{domxref("AudioContext")}} interface returns a double that represents the number of seconds of processing latency incurred by the AudioContext passing the audio from the {{domxref("AudioDestinationNode")}} to the audio subsystem.

+ +

You can request a certain latency during {{domxref("AudioContext.AudioContext()", "construction time", "", "true")}} with the latencyHint option but the browser may ignore the option.

+ +

Syntax

+ +
var baseLatency = audioCtx.baseLatency;
+ +

Value

+ +

A double representing the base latency in seconds.

+ +

Example

+ +
//default latency ("interactive")
+const audioCtx1 = new AudioContext();
+console.log(audioCtx1.baseLatency);//0.01
+
+//higher latency ("playback")
+const audioCtx2 = new AudioContext({ latencyHint: 'playback' });
+console.log(audioCtx2.baseLatency);//0.02
+
+ +

Specifications

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API','#dom-audiocontext-baselatency','baseLatency')}}{{Spec2('Web Audio API')}}Initial definition.
+ +

Browser Compatibility

+ +
+ + +

{{Compat("api.AudioContext.baseLatency")}}

+
diff --git a/files/zh-cn/web/api/audiocontext/close/index.html b/files/zh-cn/web/api/audiocontext/close/index.html new file mode 100644 index 0000000000..68a2198776 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/close/index.html @@ -0,0 +1,115 @@ +--- +title: AudioContext.close() +slug: Web/API/AudioContext/close +translation_of: Web/API/AudioContext/close +--- +

{{ APIRef("Web Audio API") }}

+ +

{{ domxref("AudioContext") }}的close()方法可以关闭audio context,同时释放占用的所有系统资源。

+ +

关闭的context不能用来创建新节点,但可以解码音频数据,创建buffer等等

+ +

该函数不会自动释放所有用AudioContext创建的对象,除非其他引用也都已经解除了。但是,它会强制释放所有可能阻止其它AudioContexts被创建或使用的系统音频资源。挂起audio context中音频时间的进度,并停止对音频数据的处理。所有的AudioContext创建/阻塞资源都被释放后,返回的{{jsxref("Promise")}}才会被释放。如果在一个{{domxref("OfflineAudioContext")}}上调用该方法,则会抛出INVALID_STATE_ERR 异常。

+ +

语法

+ +
var audioCtx = new AudioContext();
+audioCtx.close().then(function() { ... });
+
+ +

返回值

+ +

一个 resolve  void值得 {{jsxref("Promise")}}。 

+ +

例子

+ +

下面这段代码是AudioContext states demo (直接运行)中的,点击停止按钮调用close()。promise释放后,回到初始状态。

+ +
stopBtn.onclick = function() {
+  audioCtx.close().then(function() {
+    startBtn.removeAttribute('disabled');
+    susresBtn.setAttribute('disabled','disabled');
+    stopBtn.setAttribute('disabled','disabled');
+  });
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
规范状态注释
{{SpecName('Web Audio API', '#widl-AudioContext-close-Promise-void', 'close()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(43.0)}}{{CompatGeckoDesktop(40.0)}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidAndroid WebviewFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatNo}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatNo}}
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/createanalyser/index.html b/files/zh-cn/web/api/audiocontext/createanalyser/index.html new file mode 100644 index 0000000000..2d00a8a100 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createanalyser/index.html @@ -0,0 +1,154 @@ +--- +title: AudioContext.createAnalyser() +slug: Web/API/AudioContext/createAnalyser +translation_of: Web/API/BaseAudioContext/createAnalyser +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的createAnalyser()方法能创建一个{{ domxref("AnalyserNode") }},可以用来获取音频时间和频率数据,以及实现数据可视化。

+
+ +
+

注意:关于该节点的更多信息,请查看{{domxref("AnalyserNode")}}

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var analyser = audioCtx.createAnalyser();
+ +

返回值

+ +

{{domxref("AnalyserNode")}}对象

+ +

例子

+ +

下面的例子展示了AudioContext创建分析器节点的基本用法,然后用requestAnimationFrame()来反复获取时域数据,并绘制出当前音频输入的“示波器风格”输出。更多完整例子请查看Voice-change-O-matic demo (中app.js的128–205行代码)

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var analyser = audioCtx.createAnalyser();
+
+  ...
+
+analyser.fftSize = 2048;
+var bufferLength = analyser.fftSize;
+var dataArray = new Uint8Array(bufferLength);
+analyser.getByteTimeDomainData(dataArray);
+
+// draw an oscilloscope of the current audio source
+
+function draw() {
+
+      drawVisual = requestAnimationFrame(draw);
+
+      analyser.getByteTimeDomainData(dataArray);
+
+      canvasCtx.fillStyle = 'rgb(200, 200, 200)';
+      canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
+
+      canvasCtx.lineWidth = 2;
+      canvasCtx.strokeStyle = 'rgb(0, 0, 0)';
+
+      canvasCtx.beginPath();
+
+      var sliceWidth = WIDTH * 1.0 / bufferLength;
+      var x = 0;
+
+      for(var i = 0; i < bufferLength; i++) {
+
+        var v = dataArray[i] / 128.0;
+        var y = v * HEIGHT/2;
+
+        if(i === 0) {
+          canvasCtx.moveTo(x, y);
+        } else {
+          canvasCtx.lineTo(x, y);
+        }
+
+        x += sliceWidth;
+      }
+
+      canvasCtx.lineTo(canvas.width, canvas.height/2);
+      canvasCtx.stroke();
+    };
+
+    draw();
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createAnalyser-AnalyserNode', 'createAnalyser()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html b/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html new file mode 100644 index 0000000000..fa5884ad71 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html @@ -0,0 +1,139 @@ +--- +title: AudioContext.createBiquadFilter() +slug: Web/API/AudioContext/createBiquadFilter +tags: + - API + - EQ + - Web Audio API + - 参考 + - 方法 + - 滤波器 +translation_of: Web/API/BaseAudioContext/createBiquadFilter +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }} 的createBiquadFilter() 方法创建了一个  {{ domxref("BiquadFilterNode") }}, 它提供了一个可以指定多个不同的一般滤波器类型的双二阶滤波器。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var biquadFilter = audioCtx.createBiquadFilter();
+ +

返回

+ +

一个 {{domxref("BiquadFilterNode")}}.

+ +

示例

+ +

这个例子展示了一个利用AudioContext 创建四项滤波器节点( Biquad filter node)的例子。想要查看完整工作的示例,请查看我们的For voice-change-o-matic 样例 (也可以查看  源码 ).

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+
+//set up the different audio nodes we will use for the app
+var analyser = audioCtx.createAnalyser();
+var distortion = audioCtx.createWaveShaper();
+var gainNode = audioCtx.createGain();
+var biquadFilter = audioCtx.createBiquadFilter();
+var convolver = audioCtx.createConvolver();
+
+// connect the nodes together
+
+source = audioCtx.createMediaStreamSource(stream);
+source.connect(analyser);
+analyser.connect(distortion);
+distortion.connect(biquadFilter);
+biquadFilter.connect(convolver);
+convolver.connect(gainNode);
+gainNode.connect(audioCtx.destination);
+
+// Manipulate the Biquad filter
+
+biquadFilter.type = "lowshelf";
+biquadFilter.frequency.value = 1000;
+biquadFilter.gain.value = 25;
+ +

规格

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createBiquadFilter-BiquadFilterNode', 'createBiquadFilter()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0 {{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

 

+ +

相关

+ + diff --git a/files/zh-cn/web/api/audiocontext/createbuffer/index.html b/files/zh-cn/web/api/audiocontext/createbuffer/index.html new file mode 100644 index 0000000000..2d29213737 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createbuffer/index.html @@ -0,0 +1,181 @@ +--- +title: AudioContext.createBuffer() +slug: Web/API/AudioContext/createBuffer +tags: + - 创建音频片段 + - 接口 + - 方法 + - 音频环境 +translation_of: Web/API/BaseAudioContext/createBuffer +--- +

音频环境{{ domxref("AudioContext") }} 接口的 createBuffer() 方法用于新建一个空白的 {{ domxref("AudioBuffer") }} 对象,以便用于填充数据,通过 {{ domxref("AudioBufferSourceNode") }} 播放。

+ +

更多关于音频片段(Audio Buffer)的细节,请参考{{ domxref("AudioBuffer") }}页面。

+ +
+

注意: createBuffer() 曾被用于接收压缩后的音频数据,并返回被解码的音频,但是这项功能现在已经被移除,因为所有的解码工作应当在主线程中被完成,createBuffer() 阻塞了其他代码的执行。异步方法 decodeAudioData() 能够完成相同的工作 —— 传入一个压缩过的音频(如MP3格式的文件),并直接返回一个可以通过 {{ domxref("AudioBufferSourceNode") }} 播放的 {{ domxref("AudioBuffer") }} 。因此播放诸如MP3等格式的压缩音频时,你应当使用 decodeAudioData() 方法。

+
+ +

语法

+ +
AudioContext.createBuffer(Number numOfChannels, Number length, Number sampleRate);
+ +

参数

+ +
+

注意:如果想深入了解 audio buffers 是如何工作的、这些参数的具体含义,请阅读这篇简短的指南: Audio buffers: frames, samples and channels(英)。

+
+ +
+
numOfChannels
+
一个定义了 buffer 中包含的声频通道数量的整数。
+ 一个标准的实现必须包含至少32个声频通道。
+
 
+
length
+
一个代表 buffer 中的样本帧数的整数。
+
sampleRate
+
线性音频样本的采样率,即每一秒包含的关键帧的个数。实现过程中必须支持 22050~96000的采样率。
+
+ +

 

+ +

返回值

+ +

一个 {{domxref("AudioBuffer")}}。

+ +

示例

+ +

首先,我们将从几个浅显易懂的示例入手,来解释如何使用这些参数:

+ +
var audioCtx = new AudioContext();
+var buffer = audioCtx.createBuffer(2, 22050, 44100);
+ +

如果你这样调用,你将会得到一个立体声(两个声道)的音频片段(Buffer),当它在一个频率为44100赫兹(这是目前大部分声卡处理声音的频率)的音频环境({{ domxref("AudioContext") }})中播放的时候,会持续0.5秒:22050帧 / 44100赫兹 = 0.5 秒。

+ +
var audioCtx = new AudioContext();
+var buffer = audioCtx.createBuffer(1, 22050, 22050);
+ +

如果你这样调用,你将会得到一个单声道的音频片段(Buffer),当它在一个频率为44100赫兹的音频环境({{ domxref("AudioContext") }})中播放的时候,将会被自动按照44100赫兹*重采样*(因此也会转化为44100赫兹的片段),并持续1秒:44100帧 / 44100赫兹 = 1秒。

+ +
+

注意: 音频重采样与图片的缩放非常类似:比如你有一个16 x 16的图像,但是你想把它填充到一个32 x 32大小的区域,你就要对它进行缩放(重采样)。得到的结果会是一个叫低品质的(图像会模糊或者有锯齿形的边缘,这取决于缩放采用的算法),但它却是能将原图形缩放,并且缩放后的图像占用空间比相同大小的普通图像要小。重新采样的音频道理相同——你会介于一些空间,但事实上你无法产出高频率的声音(高音区)。

+
+ +

现在让我们来看一个更加复杂的示例,我们将创建一个时长2秒的音频片段,并用白噪声填充它,之后通过一个 音频片段源节点({{ domxref("AudioBufferSourceNode") }}) 播放。代码中的注释应该能充分解释发生了什么。你可以 在线演示 ,或者 查看源代码

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var button = document.querySelector('button');
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+
+pre.innerHTML = myScript.innerHTML;
+
+// 立体声
+var channels = 2;
+// 创建一个 采样率与音频环境(AudioContext)相同的 时长2秒的 音频片段。
+var frameCount = audioCtx.sampleRate * 2.0;
+
+var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
+
+button.onclick = function() {
+  // 使用白噪声填充;
+  // 就是 -1.0 到 1.0 之间的随机数
+  for (var channel = 0; channel < channels; channel++) {
+   // 这允许我们读取实际音频片段(AudioBuffer)中包含的数据
+   var nowBuffering = myArrayBuffer.getChannelData(channel);
+   for (var i = 0; i < frameCount; i++) {
+     // Math.random() is in [0; 1.0]
+     // audio needs to be in [-1.0; 1.0]
+     nowBuffering[i] = Math.random() * 2 - 1;
+   }
+  }
+
+  // 获取一个 音频片段源节点(AudioBufferSourceNode)。
+  // 当我们想播放音频片段时,我们会用到这个源节点。
+  var source = audioCtx.createBufferSource();
+  // 把刚才生成的片段加入到 音频片段源节点(AudioBufferSourceNode)。
+  source.buffer = myArrayBuffer;
+  // 把 音频片段源节点(AudioBufferSourceNode) 连接到
+  // 音频环境(AudioContext) 的终节点,这样我们就能听到声音了。
+  source.connect(audioCtx.destination);
+  // 开始播放声源
+  source.start();
+}
+ +

规范

+ + + + + + + + + + + + + + +
规范现状备注
{{SpecName('Web Audio API', '#widl-AudioContext-createBuffer-AudioBuffer-unsigned-long-numberOfChannels-unsigned-long-length-float-sampleRate', 'createBuffer()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0 {{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

相关链接

+ + diff --git a/files/zh-cn/web/api/audiocontext/createbuffersource/index.html b/files/zh-cn/web/api/audiocontext/createbuffersource/index.html new file mode 100644 index 0000000000..5244513312 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createbuffersource/index.html @@ -0,0 +1,150 @@ +--- +title: AudioContext.createBufferSource() +slug: Web/API/AudioContext/createBufferSource +tags: + - API + - 音源 + - 音频源 + - 音频节点 +translation_of: Web/API/BaseAudioContext/createBufferSource +--- +

{{ APIRef("Web Audio API") }}

+ +
+

createBufferSource() 方法用于创建一个新的{{ domxref("AudioBufferSourceNode") }}接口, 该接口可以通过{{ domxref("AudioBuffer") }} 对象来播放音频数据. {{ domxref("AudioBuffer") }}对象可以通过{{domxref("AudioContext.createBuffer")}} 来创建或者通过 {{domxref("AudioContext.decodeAudioData")}}成功解码音轨后获取.

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var source = audioCtx.createBufferSource();
+ +

返回

+ +

一个{{domxref("AudioBufferSourceNode")}}对象.

+ +

例子

+ +

在这个例子中, 我们将会创建一个2秒的缓冲器,并用白噪音填充它, 然后通过{{ domxref("AudioBufferSourceNode") }}来播放它. 

+ +
+

Note: You can also run the code live, or view the source.

+
+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var button = document.querySelector('button');
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+
+pre.innerHTML = myScript.innerHTML;
+
+// Stereo
+var channels = 2;
+// Create an empty two second stereo buffer at the
+// sample rate of the AudioContext
+var frameCount = audioCtx.sampleRate * 2.0;
+
+var myArrayBuffer = audioCtx.createBuffer(2, frameCount, audioCtx.sampleRate);
+
+button.onclick = function() {
+  // Fill the buffer with white noise;
+  //just random values between -1.0 and 1.0
+  for (var channel = 0; channel < channels; channel++) {
+   // This gives us the actual ArrayBuffer that contains the data
+   var nowBuffering = myArrayBuffer.getChannelData(channel);
+   for (var i = 0; i < frameCount; i++) {
+     // Math.random() is in [0; 1.0]
+     // audio needs to be in [-1.0; 1.0]
+     nowBuffering[i] = Math.random() * 2 - 1;
+   }
+  }
+
+  // Get an AudioBufferSourceNode.
+  // This is the AudioNode to use when we want to play an AudioBuffer
+  var source = audioCtx.createBufferSource();
+  // set the buffer in the AudioBufferSourceNode
+  source.buffer = myArrayBuffer;
+  // connect the AudioBufferSourceNode to the
+  // destination so we can hear the sound
+  source.connect(audioCtx.destination);
+  // start the source playing
+  source.start();
+}
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createBufferSource-AudioBufferSourceNode', 'createBufferSource()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器支持

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html b/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html new file mode 100644 index 0000000000..281dcddfe7 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html @@ -0,0 +1,143 @@ +--- +title: AudioContext.createChannelMerger() +slug: Web/API/AudioContext/createChannelMerger +tags: + - API + - Audio + - AudioContext + - Audio_Chinese +translation_of: Web/API/BaseAudioContext/createChannelMerger +--- +

{{ APIRef("Web Audio API") }}

+ +
+

AudioContext.createChannelMerger()方法,会创建一个ChannelMergerNode,后者可以把多个音频流的通道整合到一个音频流。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var merger = audioCtx.createChannelMerger(2);
+ +

参数

+ +
+
numberOfInputs
+
通道输入音频流的数量,输出流将包含这个数量的通道。默认值6。
+
+ +

返回值

+ +

一个 {{domxref("ChannelMergerNode")}}.

+ +

(举个)栗(例)子

+ +

下面的例子展示了如何分离立体音轨(就是一段音乐),处理使左右声道不同。使用的时候,需要指定AudioNode.connect(AudioNode)方法的第二个和第三个参数,分别用来指定通道链接来源的索引和输出的索引。

+ +
var ac = new AudioContext();
+ac.decodeAudioData(someStereoBuffer, function(data) {
+ var source = ac.createBufferSource();
+ source.buffer = data;
+ var splitter = ac.createChannelSplitter(2);
+ source.connect(splitter);
+ var merger = ac.createChannelMerger(2);
+
+ // Reduce the volume of the left channel only
+ var gainNode = ac.createGain();
+ gainNode.gain.value = 0.5;
+ splitter.connect(gainNode, 0);
+
+ // Connect the splitter back to the second input of the merger: we
+ // effectively swap the channels, here, reversing the stereo image.
+ gainNode.connect(merger, 0, 1);
+ splitter.connect(merger, 1, 0);
+
+ var dest = ac.createMediaStreamDestination();
+
+ // Because we have used a ChannelMergerNode, we now have a stereo
+ // MediaStream we can use to pipe the Web Audio graph to WebRTC,
+ // MediaRecorder, etc.
+ merger.connect(dest);
+});
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createChannelMerger-ChannelMergerNode-unsigned-long-numberOfInputs', 'createChannelMerger()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

相关页面

+ + diff --git a/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html b/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html new file mode 100644 index 0000000000..f46f5be2c5 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html @@ -0,0 +1,138 @@ +--- +title: AudioContext.createChannelSplitter() +slug: Web/API/AudioContext/createChannelSplitter +translation_of: Web/API/BaseAudioContext/createChannelSplitter +--- +

{{ APIRef("Web Audio API") }}

+ +
+

The createChannelSplitter() method of the {{ domxref("AudioContext") }} Interface is used to create a {{domxref("ChannelSplitterNode")}}, which is used to access the individual channels of an audio stream and process them separately.

+
+ +

Syntax

+ +
var audioCtx = new AudioContext();
+var splitter = audioCtx.createChannelSplitter(2);
+ +

参数

+ +
+
numberOfOutputs
+
你期待将输入音频分割成的声道道数目; 当不传入参数时,默认为6
+
+ +

Returns

+ +

一个 {{domxref("ChannelSplitterNode")}}.

+ +

Example

+ +

下面这个简单的例子告诉你怎样分割一个双声道音轨 (或者说一段音乐), 以及对于左右声道不同的处理. 要使用它们, 你需要用到{{domxref("AudioNode.connect(AudioNode)") }}方法的第二个和第三个参数, 他们会指定链接声道源的序号和链接到的声道序号.

+ +
var ac = new AudioContext();
+ac.decodeAudioData(someStereoBuffer, function(data) {
+ var source = ac.createBufferSource();
+ source.buffer = data;
+ var splitter = ac.createChannelSplitter(2);
+ source.connect(splitter);
+ var merger = ac.createChannelMerger(2);
+
+ // Reduce the volume of the left channel only
+ var gainNode = ac.createGain();
+ gainNode.gain.value = 0.5;
+ splitter.connect(gainNode, 0);
+
+ // Connect the splitter back to the second input of the merger: we
+ // effectively swap the channels, here, reversing the stereo image.
+ gainNode.connect(merger, 0, 1);
+ splitter.connect(merger, 1, 0);
+
+ var dest = ac.createMediaStreamDestination();
+
+ // Because we have used a ChannelMergerNode, we now have a stereo
+ // MediaStream we can use to pipe the Web Audio graph to WebRTC,
+ // MediaRecorder, etc.
+ merger.connect(dest);
+});
+ +

规格

+ + + + + + + + + + + + + + +
规格状态注释
{{SpecName('Web Audio API', '#widl-AudioContext-createChannelSplitter-ChannelSplitterNode-unsigned-long-numberOfOutputs', 'createChannelSplitter()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/createconvolver/index.html b/files/zh-cn/web/api/audiocontext/createconvolver/index.html new file mode 100644 index 0000000000..2cbe395edc --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createconvolver/index.html @@ -0,0 +1,131 @@ +--- +title: AudioContext.createConvolver() +slug: Web/API/AudioContext/createConvolver +translation_of: Web/API/BaseAudioContext/createConvolver +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的方法createConvolver()能创建一个{{ domxref("ConvolverNode") }},通常用来对你的音频应用混响效果。在 Convolution规范定义 中查看更多信息。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var convolver = audioCtx.createConvolver();
+ +

返回值

+ +

{{domxref("ConvolverNode")}}对象。

+ +

例子

+ +

下面的例子展示了一个 AudioContext 创建一个 混响器节点 的基本使用方法。基本前提是你创建一个包含声音样本的 AudioBuffer 用作混响环境 (称之为脉冲响应,) 和在混响器中应用。 下面的例子使用了一个简短的示例音乐厅人群效果,所以混响效果应用深度和回声。

+ +

更多完整例子请查看Voice-change-O-matic demo (中app.js的代码)。

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var convolver = audioCtx.createConvolver();
+
+  ...
+
+// grab audio track via XHR for convolver node
+
+var soundSource, concertHallBuffer;
+
+ajaxRequest = new XMLHttpRequest();
+ajaxRequest.open('GET', 'concert-crowd.ogg', true);
+ajaxRequest.responseType = 'arraybuffer';
+
+ajaxRequest.onload = function() {
+  var audioData = ajaxRequest.response;
+  audioCtx.decodeAudioData(audioData, function(buffer) {
+      concertHallBuffer = buffer;
+      soundSource = audioCtx.createBufferSource();
+      soundSource.buffer = concertHallBuffer;
+    }, function(e){"Error with decoding audio data" + e.err});
+}
+
+ajaxRequest.send();
+
+  ...
+
+convolver.buffer = concertHallBuffer;
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createConvolver-ConvolverNode', 'createConvolver()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/createdelay/index.html b/files/zh-cn/web/api/audiocontext/createdelay/index.html new file mode 100644 index 0000000000..b8e502758d --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createdelay/index.html @@ -0,0 +1,213 @@ +--- +title: AudioContext.createDelay() +slug: Web/API/AudioContext/createDelay +translation_of: Web/API/BaseAudioContext/createDelay +--- +

{{ APIRef("Web Audio API") }}

+ +
+

  createDelay() 是  {{ domxref("AudioContext") }}   的一个方法,作用是将输入音频信号延迟一定时间。(比如可以实现 对着话筒说句话,然后几秒后 这句话从音响里播放出来)

+ +

 

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var synthDelay = audioCtx.createDelay(maxDelayTime);
+ +

参数

+ +
+
maxDelayTime
+
设置最大允许延迟的时间,以“秒”为单位
+
+ +

返回

+ +

A {{domxref("DelayNode")}}. The default {{domxref("DelayNode.delayTime")}} if no parameter is passed to createDelay() is 0 seconds.

+ +

以上是原文,大意是返回延时时间,没有设置时默认是0

+ +

 

+ +

示例

+ +

首先是中文版的简洁的示例,这个例子中 话筒里接收到的声音 会延迟3秒 从音响中播放

+ +
window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext;
+
+try {//音频相关api
+    var audioContext = new window.AudioContext();
+    var synthDelay = audioContext.createDelay(5.0);
+} catch (e) {
+    alert("你浏览器不支持");
+}
+
+
+var error = function (error) {alert("有错误"); };
+
+//以下是获取麦克风
+if (navigator.getUserMedia) { //标准api
+ navigator.getUserMedia({ "audio": true },
+ function (stream) {
+ micto(stream);    //具体工作
+                   }, error);
+}else if(navigator.webkitGetUserMedia) {   //webkit api
+ navigator.webkitGetUserMedia({audio:true, video:  false },
+ function (stream) {
+  micto(stream); //具体工作
+                   }, error);
+ }else if (navigator.mozGetUserMedia) {  //火狐 api
+ navigator.mozGetUserMedia({ "audio": true },
+ function (stream) {
+  micto(stream);//具体工作
+                   }, error);
+ }else if (navigator.msGetUserMedia) { //ie api
+ navigator.msGetUserMedia({ "audio": true },
+ function (stream) {
+  micto(stream);//具体工作
+                   }, error);
+ } else {
+   alert("您的浏览器版不支持这个api");
+}
+
+
+
+
+
+ var micto = function(stream) {
+
+  synthDelay.delayTime.value = 3.0;   //延迟3秒
+
+  var source = audioContext.createMediaStreamSource(stream);
+
+  source.connect(synthDelay);
+
+  synthDelay.connect(audioContext.destination);
+
+      }
+ 
+ +

 

+ +

 以下是英文版示例

+ +

We have created a simple example that allows you to play three different samples on a constant loop — see create-delay (you can also view the source code). If you just press the play buttons, the loops will start immediately; if you slide the sliders up to the right, then press the play buttons, a delay will be introduced, so the looping sounds don't start playing for a short amount of time.

+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+
+var synthDelay = audioCtx.createDelay(5.0);
+
+  ...
+
+var synthSource;
+
+playSynth.onclick = function() {
+  synthSource = audioCtx.createBufferSource();
+  synthSource.buffer = buffers[2];
+  synthSource.loop = true;
+  synthSource.start();
+  synthSource.connect(synthDelay);
+  synthDelay.connect(destination);
+  this.setAttribute('disabled', 'disabled');
+}
+
+stopSynth.onclick = function() {
+  synthSource.disconnect(synthDelay);
+  synthDelay.disconnect(destination);
+  synthSource.stop();
+  playSynth.removeAttribute('disabled');
+}
+
+...
+
+var delay1;
+rangeSynth.oninput = function() {
+delay1 = rangeSynth.value;
+synthDelay.delayTime.value = delay1;
+}
+
+ +

Specifications

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createDelay-DelayNode-double-maxDelayTime', 'createDelay()')}}{{Spec2('Web Audio API')}} 
+ +

Browser compatibility

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/createmediaelementsource/index.html b/files/zh-cn/web/api/audiocontext/createmediaelementsource/index.html new file mode 100644 index 0000000000..9b7aec1420 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createmediaelementsource/index.html @@ -0,0 +1,167 @@ +--- +title: AudioContext.createMediaElementSource() +slug: Web/API/AudioContext/createMediaElementSource +translation_of: Web/API/AudioContext/createMediaElementSource +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }} 接口的 createMediaElementSource() 方法用于创建一个新的 {{ domxref("MediaElementAudioSourceNode") }} 对象,输入某个存在的 HTML {{htmlelement("audio")}} or {{htmlelement("video")}} 元素, 对应的音频即可被播放或者修改.

+
+ +

为寻求更多关于媒体元素音频源节点的具体信息,请查阅 {{ domxref("MediaElementAudioSourceNode") }} 参考页.

+ +

语法

+ +
var audioCtx = new AudioContext();
+var source = audioCtx.createMediaElementSource(myMediaElement);
+ +

参数

+ +
+
myMediaElement
+
某个被期待被录入音频处理图修改的 {{domxref("HTMLMediaElement")}} 对象.
+
+ +

返回值

+ +

一个 {{domxref("MediaElementAudioSourceNode")}} 对象.

+ +

示例

+ +

该示例由 {{htmlelement("audio") }} 元素,通过使用 createMediaElementSource() 方法,创建了一个音源,将其通过 {{ domxref("GainNode") }} 节点,输出到{{ domxref("AudioDestinationNode") }} 节点以播放.当鼠标指针移动时, updatePage() 函数被调用,该函数计算当前鼠标Y坐标与页面高度的比值, 改变 {{ domxref("GainNode") }} 节点的值以调整音量.您就可以通过鼠标上下移动而改变音乐的音量了.

+ +
+

Note: 您也可以 浏览该示例的在线演示, 或者 浏览源代码.

+
+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var myAudio = document.querySelector('audio');
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+
+pre.innerHTML = myScript.innerHTML;
+
+// Create a MediaElementAudioSourceNode
+// Feed the HTMLMediaElement into it
+var source = audioCtx.createMediaElementSource(myAudio);
+
+// Create a gain node
+var gainNode = audioCtx.createGain();
+
+// Create variables to store mouse pointer Y coordinate
+// and HEIGHT of screen
+var CurY;
+var HEIGHT = window.innerHeight;
+
+// Get new mouse pointer coordinates when mouse is moved
+// then set new gain value
+
+document.onmousemove = updatePage;
+
+function updatePage(e) {
+    CurY = (window.Event) ? e.pageY : event.clientY + (document.documentElement.scrollTop ? document.documentElement.scrollTop : document.body.scrollTop);
+
+    gainNode.gain.value = CurY/HEIGHT;
+}
+
+
+// connect the AudioBufferSourceNode to the gainNode
+// and the gainNode to the destination, so we can play the
+// music and adjust the volume using the mouse cursor
+source.connect(gainNode);
+gainNode.connect(audioCtx.destination);
+ +
+

Note: 作为调用 createMediaElementSource() 的结果,{{domxref("HTMLMediaElement")}} 的播放将会由AudioContext的音频处理图接管.所以对媒体进行播放/暂停等操作仍可通过media API与播放器面板来进行.

+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createMediaElementSource-MediaElementAudioSourceNode-HTMLMediaElement-mediaElement', 'createMediaElementSource()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22
+

6.0{{property_prefix("webkit")}}

+ +

- buggy! not working -

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}} +

{{CompatUnknown}}

+ +

- buggy! not working -

+
33.0
+
+ +

相关链接

+ + diff --git a/files/zh-cn/web/api/audiocontext/createmediastreamdestination/index.html b/files/zh-cn/web/api/audiocontext/createmediastreamdestination/index.html new file mode 100644 index 0000000000..c934a2fd2d --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createmediastreamdestination/index.html @@ -0,0 +1,161 @@ +--- +title: AudioContext.createMediaStreamDestination() +slug: Web/API/AudioContext/createMediaStreamDestination +translation_of: Web/API/AudioContext/createMediaStreamDestination +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}接口的createMediaStreamDestination()方法用于创建一个新的对象,该对象关联着表示音频流的一个 WebRTC {{domxref("MediaStream")}} ,音频流可以存储在本地文件或者被发送到另外一台计算机.

+
+ +

The {{domxref("MediaStream")}} is created when the node is created and is accessible via the {{domxref("MediaStreamAudioDestinationNode")}}'s stream attribute. This stream can be used in a similar way as a MediaStream obtained via {{domxref("navigator.getUserMedia") }} — it can, for example, be sent to a remote peer using the RTCPeerConnection addStream() method.

+ +

For more details about media stream destination nodes, check out the {{domxref("MediaStreamAudioDestinationNode")}} reference page.

+ +

语法

+ +
var audioCtx = new AudioContext();
+var destination = audioCtx.createMediaStreamDestination();
+ +

返回值

+ +

A {{domxref("MediaStreamAudioDestinationNode")}}.

+ +

Example

+ +

In the following simple example, we create a {{domxref("MediaStreamAudioDestinationNode")}}, an {{ domxref("OscillatorNode") }} and a {{ domxref("MediaRecorder") }} (the example will therefore only work in Firefox at this time.) The MediaRecorder is set up to record information from the MediaStreamDestinationNode.

+ +

When the button is clicked, the oscillator starts, and the MediaRecorder is started. When the button is stopped, the oscillator and MediaRecorder both stop. Stopping the MediaRecorder causes the dataavailable event to fire, and the event data is pushed into the chunks array. After that, the stop event fires, a new blob is made of type opus — which contains the data in the chunks array, and a new window (tab) is then opened that points to a URL created from the blob.

+ +

From here, you can play and save the opus file.

+ +
<!DOCTYPE html>
+<html>
+  <head>
+    <title>createMediaStreamDestination() demo</title>
+  </head>
+  <body>
+    <h1>createMediaStreamDestination() demo</h1>
+
+    <p>Encoding a pure sine wave to an Opus file </p>
+    <button>Make sine wave</button>
+    <audio controls></audio>
+    <script>
+     var b = document.querySelector("button");
+     var clicked = false;
+     var chunks = [];
+     var ac = new AudioContext();
+     var osc = ac.createOscillator();
+     var dest = ac.createMediaStreamDestination();
+     var mediaRecorder = new MediaRecorder(dest.stream);
+     osc.connect(dest);
+
+     b.addEventListener("click", function(e) {
+       if (!clicked) {
+           mediaRecorder.start();
+           osc.start(0);
+           e.target.innerHTML = "Stop recording";
+           clicked = true;
+         } else {
+           mediaRecorder.stop();
+           osc.stop(0);
+           e.target.disabled = true;
+         }
+     });
+
+     mediaRecorder.ondataavailable = function(evt) {
+       // push each chunk (blobs) in an array
+       chunks.push(evt.data);
+     };
+
+     mediaRecorder.onstop = function(evt) {
+       // Make blob out of our blobs, and open it.
+       var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
+       document.querySelector("audio").src = URL.createObjectURL(blob);
+     };
+    </script>
+  </body>
+</html>
+ +
+

Note: You can view this example live, or study the source code, on Github.

+
+ +

Specifications

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createMediaStreamDestination-MediaStreamAudioDestinationNode', 'createMediaStreamDestination()')}}{{Spec2('Web Audio API')}} 
+ +

Browser compatibility

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/createmediastreamsource/index.html b/files/zh-cn/web/api/audiocontext/createmediastreamsource/index.html new file mode 100644 index 0000000000..8ded5b30d6 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createmediastreamsource/index.html @@ -0,0 +1,180 @@ +--- +title: AudioContext.createMediaStreamSource() +slug: Web/API/AudioContext/createMediaStreamSource +translation_of: Web/API/AudioContext/createMediaStreamSource +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}接口的createMediaStreamSource()方法用于创建一个新的{{ domxref("MediaStreamAudioSourceNode") }} 对象, 需要传入一个媒体流对象(MediaStream对象)(可以从 {{ domxref("navigator.getUserMedia") }} 获得MediaStream对象实例), 然后来自MediaStream的音频就可以被播放和操作。

+
+ +

更多关于媒体流音频源(media stream audio source nodes)的细节, 请参考{{ domxref("MediaStreamAudioSourceNode") }} 页面.

+ +

语法

+ +
var audioCtx = new AudioContext();
+var source = audioCtx.createMediaStreamSource(stream);
+ +

参数

+ +
+
stream
+
一个{{domxref("MediaStream")}} 对象,把他传入一个音频处理器进行操作
+
+ +

返回

+ +

 {{domxref("MediaStreamAudioSourceNode")}}

+ +

示例

+ +

本例中,我们从 {{ domxref("navigator.getUserMedia") }}获取媒体 (audio + video) 流,,把它传入 {{ htmlelement("video") }}中播放,并把视频调成静音,然后把获取到的audio传入 {{ domxref("MediaStreamAudioSourceNode") }}。接下来我们把获取到的audio传入{{ domxref("BiquadFilterNode") }} (可以把声音转化为低音),输出到 {{domxref("AudioDestinationNode") }}.

+ +

{{ htmlelement("video") }} 元素下面滑动杆控制低音过滤器过滤的程度,滑动杆的值越大,低音更明显

+ +
+

注意:你可以查看 在线演示,或者 查看源码

+
+ +
var pre = document.querySelector('pre');
+var video = document.querySelector('video');
+var myScript = document.querySelector('script');
+var range = document.querySelector('input');
+
+// getUserMedia获取流
+// 把流放入MediaStreamAudioSourceNode
+// 输出到video元素
+
+if (navigator.mediaDevices) {
+    console.log('getUserMedia supported.');
+    navigator.mediaDevices.getUserMedia ({audio: true, video: true})
+    .then(function(stream) {
+        video.srcObject = stream;
+        video.onloadedmetadata = function(e) {
+            video.play();
+            video.muted = true;
+        };
+
+        // 创建MediaStreamAudioSourceNode
+        // Feed the HTMLMediaElement into it
+        var audioCtx = new AudioContext();
+        var source = audioCtx.createMediaStreamSource(stream);
+
+        // 创建二阶滤波器
+        var biquadFilter = audioCtx.createBiquadFilter();
+        biquadFilter.type = "lowshelf";
+        biquadFilter.frequency.value = 1000;
+        biquadFilter.gain.value = range.value;
+
+        // 把AudioBufferSourceNode连接到gainNode
+        // gainNode连接到目的地, 所以我们可以播放
+        // 音乐并用鼠标调节音量
+        source.connect(biquadFilter);
+        biquadFilter.connect(audioCtx.destination);
+
+        // Get new mouse pointer coordinates when mouse is moved
+        // then set new gain value
+
+        range.oninput = function() {
+            biquadFilter.gain.value = range.value;
+        }
+    })
+    .catch(function(err) {
+        console.log('The following gUM error occured: ' + err);
+    });
+} else {
+   console.log('getUserMedia not supported on your browser!');
+}
+
+// dump script to pre element
+
+pre.innerHTML = myScript.innerHTML;
+ +
+

注意: 调用createMediaStreamSource(), 来自于媒体流的音频回放将被重新传到AudioContext的处理器中。所以播放/暂停流仍然是可以通过media元素的API和自带的控制器控制。

+
+ + + +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createMediaStreamSource-MediaStreamAudioSourceNode-MediaStream-mediaStream', 'createMediaStreamSource()')}}{{Spec2('Web Audio API')}}
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

查看更多

+ + diff --git a/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html b/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html new file mode 100644 index 0000000000..7e505bc06a --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html @@ -0,0 +1,199 @@ +--- +title: AudioContext.createScriptProcessor() +slug: Web/API/AudioContext/createScriptProcessor +translation_of: Web/API/BaseAudioContext/createScriptProcessor +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }} 接口的createScriptProcessor() 方法创建一个{{domxref("ScriptProcessorNode")}} 用于通过JavaScript直接处理音频.

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+myScriptProcessor = audioCtx.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ +

参数

+ +
+
bufferSize
+
缓冲区大小,以样本帧为单位。具体来讲,缓冲区大小必须是下面这些值当中的某一个: 256, 512, 1024, 2048, 4096, 8192, 16384. 如果不传,或者参数为0,则取当前环境最合适的缓冲区大小, 取值为2的幂次方的一个常数,在该node的整个生命周期中都不变.
+
该取值控制着audioprocess事件被分派的频率,以及每一次调用多少样本帧被处理. 较低bufferSzie将导致一定的延迟。较高的bufferSzie就要注意避免音频的崩溃和故障。推荐作者不要给定具体的缓冲区大小,让系统自己选一个好的值来平衡延迟和音频质量。
+
numberOfInputChannels
+
值为整数,用于指定输入node的声道的数量,默认值是2,最高能取32.
+
numberOfOutputChannels
+
值为整数,用于指定输出node的声道的数量,默认值是2,最高能取32.
+
+ +
+

重要: Webkit (version 31)要求调用这个方法的时候必须传入一个有效的bufferSize .

+
+ +
+

注意: numberOfInputChannelsnumberOfOutputChannels的值不能同时为0,二者同时为0是无效的

+
+ +

返回

+ +

A {{domxref("ScriptProcessorNode")}}.

+ +

示例

+ +

下面的例子展示了一个ScriptProcessorNode的基本用法,数据源取自 {{ domxref("AudioContext.decodeAudioData") }}, 给每一个音频样本加一点白噪声,然后通过{{domxref("AudioDestinationNode")}}播放(其实这个就是系统的扬声器)。 对于每一个声道和样本帧,在把结果当成输出样本之前,scriptNode.onaudioprocess方法关联audioProcessingEvent ,并用它来遍历每输入流的每一个声道,和每一个声道中的每一个样本,并添加一点白噪声。

+ +
+

注意: 完整的示例参照 script-processor-node github (查看源码 source code.)

+
+ +
var myScript = document.querySelector('script');
+var myPre = document.querySelector('pre');
+var playButton = document.querySelector('button');
+
+// Create AudioContext and buffer source
+var audioCtx = new AudioContext();
+source = audioCtx.createBufferSource();
+
+// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
+var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
+console.log(scriptNode.bufferSize);
+
+// load in an audio track via XHR and decodeAudioData
+
+function getData() {
+  request = new XMLHttpRequest();
+  request.open('GET', 'viper.ogg', true);
+  request.responseType = 'arraybuffer';
+  request.onload = function() {
+    var audioData = request.response;
+
+    audioCtx.decodeAudioData(audioData, function(buffer) {
+    myBuffer = buffer;
+    source.buffer = myBuffer;
+  },
+    function(e){"Error with decoding audio data" + e.err});
+  }
+  request.send();
+}
+
+// Give the node a function to process audio events
+scriptNode.onaudioprocess = function(audioProcessingEvent) {
+  // The input buffer is the song we loaded earlier
+  var inputBuffer = audioProcessingEvent.inputBuffer;
+
+  // The output buffer contains the samples that will be modified and played
+  var outputBuffer = audioProcessingEvent.outputBuffer;
+
+  // Loop through the output channels (in this case there is only one)
+  for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
+    var inputData = inputBuffer.getChannelData(channel);
+    var outputData = outputBuffer.getChannelData(channel);
+
+    // Loop through the 4096 samples
+    for (var sample = 0; sample < inputBuffer.length; sample++) {
+      // make output equal to the same as the input
+      outputData[sample] = inputData[sample];
+
+      // add noise to each output sample
+      outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
+    }
+  }
+}
+
+getData();
+
+// wire up play button
+playButton.onclick = function() {
+  source.connect(scriptNode);
+  scriptNode.connect(audioCtx.destination);
+  source.start();
+}
+
+// When the buffer source stops playing, disconnect everything
+source.onended = function() {
+  source.disconnect(scriptNode);
+  scriptNode.disconnect(audioCtx.destination);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels', 'createScriptProcessor')}}{{Spec2('Web Audio API')}}
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html b/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html new file mode 100644 index 0000000000..7aef8d5688 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html @@ -0,0 +1,133 @@ +--- +title: AudioContext.createWaveShaper() +slug: Web/API/AudioContext/createWaveShaper +translation_of: Web/API/BaseAudioContext/createWaveShaper +--- +

{{ APIRef("Web Audio API") }}

+ +

{{ domxref("AudioContext") }} 接口的createWaveShaper()方法创建了 表示非线性失真的{{ domxref("WaveShaperNode") }}。该节点通常被用来给音频添加失真效果

+ +

语法

+ +
var audioCtx = new AudioContext();
+var distortion = audioCtx.createWaveShaper();
+ +

返回

+ +

A {{domxref("WaveShaperNode")}}.

+ +

例子

+ +

The following example shows basic usage of an AudioContext to create a wave shaper node. For applied examples/information, check out our Voice-change-O-matic demo (see app.js for relevant code).

+ +

下面的例子展示了AudioContext创建一个波形整形器节点的基本用法。有关应用示例/信息,请查看我们的oice-change-O-matic demo演示(有关代码,请参阅app.js)。

+ +
+

:实现失真曲线并不是简单的事情,你可能需要到处找资料来找到这样的算法。我们在Stack Overflow上找到了以下的失真曲线代码

+
+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var distortion = audioCtx.createWaveShaper();
+
+  ...
+
+function makeDistortionCurve(amount) {
+  var k = typeof amount === 'number' ? amount : 50,
+    n_samples = 44100,
+    curve = new Float32Array(n_samples),
+    deg = Math.PI / 180,
+    i = 0,
+    x;
+  for ( ; i < n_samples; ++i ) {
+    x = i * 2 / n_samples - 1;
+    curve[i] = ( 3 + k ) * x * 20 * deg / ( Math.PI + k * Math.abs(x) );
+  }
+  return curve;
+};
+
+  ...
+
+distortion.curve = makeDistortionCurve(400);
+distortion.oversample = '4x';
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createWaveShaper-WaveShaperNode', 'createWaveShaper()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/currenttime/index.html b/files/zh-cn/web/api/audiocontext/currenttime/index.html new file mode 100644 index 0000000000..fbdaf4315c --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/currenttime/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.currentTime +slug: Web/API/AudioContext/currentTime +translation_of: Web/API/BaseAudioContext/currentTime +--- +

{{ APIRef("Web Audio API") }}

+ +
+

currentTime是{{ domxref("AudioContext") }}的一个read-only属性,返回double秒(从0开始)表示一个只增不减的硬件时间戳,可以用来控制音频回放,实现可视化时间轴等等。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+console.log(audioCtx.currentTime);
+ +

返回值

+ +

A double.

+ +

例子

+ +
+

注意:想要完整的Web Audio例子的话,可以去MDN Github repo看DEMO(例如panner-node不妨试试在浏览器控制台输入audioCtx.currentTime。

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+console.log(audioCtx.currentTime);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-currentTime', 'currentTime')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html b/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html new file mode 100644 index 0000000000..40693fd8cc --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html @@ -0,0 +1,223 @@ +--- +title: AudioContext.decodeAudioData() +slug: Web/API/AudioContext/decodeAudioData +tags: + - API + - Audio + - audio接口 + - 音频解码 +translation_of: Web/API/BaseAudioContext/decodeAudioData +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}接口的decodeAudioData()方法可用于异步解码音频文件中的 {{domxref("ArrayBuffer")}}. ArrayBuffer数据可以通过{{domxref("XMLHttpRequest")}}和{{domxref("FileReader")}}来获取. AudioBuffer是通过AudioContext采样率进行解码的,然后通过回调返回结果.

+
+ +

这是从音频轨道创建用于web audio API音频源的首选方法。

+ +

语法

+ +

旧版的回调函数语法

+ +
audioCtx.decodeAudioData(audioData, function(decodedData) {
+  // use the dec​oded data here
+});
+ +

新版的promise-based语法:

+ +
audioCtx.decodeAudioData(audioData).then(function(decodedData) {
+  // use the decoded data here
+});
+ +

举例

+ +

在本章节中,我们将首先学习基于回调的系统,然后采用新的基于promise-based的语法

+ +

旧的回调语法

+ +

在这个事例中, getData() 方法使用XHR加载一个音轨,设置请求的responsetype为ArrayBuffer使它返回一个arraybuffer数据,然后存储在audioData变量中. 然后我们将这个arraybuffer数据置于decodeAudioData()方法中使用,当成功解码PCM Data后通过回调返回, 将返回的结果通过{{ domxref("AudioContext.createBufferSource()") }}接口进行处理并获得一个{{ domxref("AudioBufferSourceNode") }}, 将源连接至{{domxref("AudioContext.destination") }}并将它设置为循环的.

+ +

通过按钮来运行 getData() 来获取音轨并播放它. 当使用 stop() 方法后source将会被清除.

+ +
+

Note: You can run the example live (or view the source.)

+
+ +
// define variables
+
+var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var source;
+
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+var play = document.querySelector('.play');
+var stop = document.querySelector('.stop');
+
+// use XHR to load an audio track, and
+// decodeAudioData to decode it and stick it in a buffer.
+// Then we put the buffer into the source
+
+function getData() {
+  source = audioCtx.createBufferSource();
+  var request = new XMLHttpRequest();
+
+  request.open('GET', 'viper.ogg', true);
+
+  request.responseType = 'arraybuffer';
+
+
+  request.onload = function() {
+    var audioData = request.response;
+
+    audioCtx.decodeAudioData(audioData, function(buffer) {
+        source.buffer = buffer;
+
+        source.connect(audioCtx.destination);
+        source.loop = true;
+      },
+
+      function(e){"Error with decoding audio data" + e.err});
+
+  }
+
+  request.send();
+}
+
+// wire up buttons to stop and play audio
+
+play.onclick = function() {
+  getData();
+  source.start(0);
+  play.setAttribute('disabled', 'disabled');
+}
+
+stop.onclick = function() {
+  source.stop(0);
+  play.removeAttribute('disabled');
+}
+
+
+// dump script to pre element
+
+pre.innerHTML = myScript.innerHTML;
+ +

新的promise-based语法

+ +
ctx.decodeAudioData(compressedBuffer).then(function(decodedData) {
+ // use the decoded data here
+});
+ +

参数

+ +
+
ArrayBuffer
+
将会被解码的音频数据,可通过{{domxref("XMLHttpRequest")}}或{{domxref("FileReader")}}来获取.
+
DecodeSuccessCallback
+
当成功解码后会被调用的回调函数. 该回调函数只有一个AudioBuffer类型参数.
+
DecodeErrorCallback
+
一个可选的错误回调函数.
+
+ +

返回

+ +

一个 {{domxref("Promise") }}对象.

+ +

标准

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-decodeAudioData-Promise-AudioBuffer--ArrayBuffer-audioData-DecodeSuccessCallback-successCallback-DecodeErrorCallback-errorCallback', 'decodeAudioData()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器支持

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
Promise-based syntax{{CompatChrome(49.0)}}{{CompatVersionUnknown}}{{CompatNo}}{{CompatVersionUnknown}}{{CompatNo}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidAndroid WebviewFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(33.0)}}
Promise-based syntax{{CompatUnknown}}{{CompatChrome(49.0)}}{{CompatVersionUnknown}}{{CompatVersionUnknown}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(49.0)}}
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/audiocontext/destination/index.html b/files/zh-cn/web/api/audiocontext/destination/index.html new file mode 100644 index 0000000000..04fdfe8247 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/destination/index.html @@ -0,0 +1,114 @@ +--- +title: AudioContext.destination +slug: Web/API/AudioContext/destination +translation_of: Web/API/BaseAudioContext/destination +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的destination属性返回一个{{ domxref("AudioDestinationNode") }}表示context中所有音频(节点)的最终目标节点,一般是音频渲染设备,比如扬声器。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+gainNode.connect(audioCtx.destination);
+ +

返回值

+ +

An {{ domxref("AudioDestinationNode") }}.

+ +

例子

+ +
+

注意:想要完整的例子,可以去看看MDN Github repo的DEMO,比如panner-node

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+var oscillatorNode = audioCtx.createOscillator();
+var gainNode = audioCtx.createGain();
+
+oscillatorNode.connect(gainNode);
+gainNode.connect(audioCtx.destination);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-destination', 'destination')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/index.html b/files/zh-cn/web/api/audiocontext/index.html new file mode 100644 index 0000000000..f2b18cf432 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/index.html @@ -0,0 +1,107 @@ +--- +title: AudioContext +slug: Web/API/AudioContext +tags: + - API + - Audio + - AudioContext + - Web Audio API + - sound +translation_of: Web/API/AudioContext +--- +
{{APIRef("Web Audio API")}}
+ +

AudioContext接口表示由链接在一起的音频模块构建的音频处理图,每个模块由一个{{domxref("AudioNode")}}表示。音频上下文控制它包含的节点的创建和音频处理或解码的执行。在做任何其他操作之前,您需要创建一个AudioContext对象,因为所有事情都是在上下文中发生的。建议创建一个AudioContext对象并复用它,而不是每次初始化一个新的AudioContext对象,并且可以对多个不同的音频源和管道同时使用一个AudioContext对象。

+ +

{{InheritanceDiagram}}

+ +

构造函数

+ +
+
{{domxref("AudioContext.AudioContext", "AudioContext()")}}
+
创建并返回一个新的 AudioContext 对象。
+
+ +

属性

+ +

也从其父接口继承属性, {{domxref("BaseAudioContext")}}.

+ +
+
{{domxref("AudioContext.baseLatency")}} {{readonlyinline}} {{experimental_inline}}
+
返回{{domxref("AudioContext")}}将音频从{{domxref("AudioDestinationNode")}}传递到音频子系统的处理延迟的秒数。
+
{{domxref("AudioContext.outputLatency")}} {{readonlyinline}} {{experimental_inline}}
+
返回对当前音频上下文的预估输出延迟。
+
+ +

方法

+ +

也从其父接口继承方法​​​​, {{domxref("BaseAudioContext")}}.

+ +
+
{{domxref("AudioContext.close()")}}
+
关闭一个音频环境, 释放任何正在使用系统资源的音频。
+
{{domxref("AudioContext.createMediaElementSource()")}}
+
创建一个{{domxref("MediaElementAudioSourceNode")}}接口来关联{{domxref("HTMLMediaElement")}}. 这可以用来播放和处理来自{{HTMLElement("video")}}或{{HTMLElement("audio")}} 元素的音频。
+
{{domxref("AudioContext.createMediaStreamSource()")}}
+
创建一个{{domxref("MediaStreamAudioSourceNode")}}接口来关联可能来自本地计算机麦克风或其他来源的音频流{{domxref("MediaStream")}}。
+
{{domxref("AudioContext.createMediaStreamDestination()")}}
+
创建一个{{domxref("MediaStreamAudioDestinationNode")}}接口来关联可能储存在本地或已发送至其他计算机的{{domxref("MediaStream")}}音频。
+
{{domxref("AudioContext.createMediaStreamTrackSource()")}}
+
创建一个{{domxref("MediaStreamTrackAudioSourceNode")}},它与一个{{domxref("MediaStream")}}相关联,表示一个媒体流轨迹。
+
{{domxref("AudioContext.getOutputTimestamp()")}}
+
返回一个新的AudioTimestamp对象,该对象包含两个与当前音频上下文相关的音频时间戳。
+
{{domxref("AudioContext.resume()")}}
+
恢复之前被暂停的音频上下文中的时间进程。
+
{{domxref("AudioContext.suspend()")}}
+
暂停音频上下文中的时间进程,暂停音频硬件访问并减少进程中的CPU/电池使用。
+
+ +

例子

+ +

简单声明:

+ +
var audioCtx = new AudioContext;
+ +

跨浏览器的方式:

+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+
+var oscillatorNode = audioCtx.createOscillator();
+var gainNode = audioCtx.createGain();
+var finish = audioCtx.destination;
+// etc.
+ +

规范

+ + + + + + + + + + + + + + +
规范状态注释
{{SpecName('Web Audio API', '#AudioContext-section', 'AudioContext')}}{{Spec2('Web Audio API')}}
+ +

浏览器兼容性

+ + + +

{{Compat("api.AudioContext")}}

+ +
+ +

相关链接

+ + diff --git a/files/zh-cn/web/api/audiocontext/listener/index.html b/files/zh-cn/web/api/audiocontext/listener/index.html new file mode 100644 index 0000000000..81b2a730a2 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/listener/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.listener +slug: Web/API/AudioContext/listener +translation_of: Web/API/BaseAudioContext/listener +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的listener属性返回一个{{ domxref("AudioListener") }} 对象,可以用来实现3D音频空间化。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var myListener = audioCtx.listener;
+ +

返回值

+ +

An {{ domxref("AudioListener") }} object.

+ +

例子

+ +
+

注意:想要完整的音频空间化例子,可以查看panner-node DEMO

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+var myListener = audioCtx.listener;
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-listener', 'listener')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html b/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html new file mode 100644 index 0000000000..2b7022c1ce --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html @@ -0,0 +1,95 @@ +--- +title: AudioContext.mozAudioChannelType +slug: Web/API/AudioContext/mozAudioChannelType +translation_of: Web/API/AudioContext/mozAudioChannelType +--- +

{{APIRef("Web Audio API")}} {{Non-standard_header}}

+ +

{{domxref("AudioContext")}}的mozAudioChannelType属性是只读的,在Firefox OS设备上可以用来设置音频在audio context中播放的声道。

+ +

该属性是AudioChannels API中定义的非标准属性,更多信息请查看Using the AudioChannels API

+ +

语法

+ +
var audioCtx = new AudioContext();
+var myAudioChannelType = audioCtx.mozAudioChannelType;
+
+ +

只能通过下面的构造器来设置AudioContext中音频的声道:

+ +
var audioCtx = new AudioContext('ringer');
+ +

返回值

+ +

A {{domxref("DOMString")}} value.

+ +

例子

+ +

TBD

+ +

规范

+ +

AudioChannels API目前没有官方规范,实现细节请查看https://wiki.mozilla.org/WebAPI/AudioChannels、WebIDL等等

+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
General support{{CompatNo}}{{CompatNo}}{{CompatNo}}{{CompatNo}}{{CompatNo}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidChromeFirefox Mobile (Gecko)Firefox OSIE PhoneOpera MobileSafari Mobile
General support{{CompatNo}}{{CompatNo}}{{CompatNo}}1.2{{CompatNo}}{{CompatNo}}{{CompatNo}}
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/onstatechange/index.html b/files/zh-cn/web/api/audiocontext/onstatechange/index.html new file mode 100644 index 0000000000..ee9b3f21c0 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/onstatechange/index.html @@ -0,0 +1,101 @@ +--- +title: AudioContext.onstatechange +slug: Web/API/AudioContext/onstatechange +translation_of: Web/API/BaseAudioContext/onstatechange +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的onstatechange属性定义了一个事件处理器函数,触发{{Event("statechange")}}会被调用,也就是说audio context的状态发生变化时会执行。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+audioCtx.onstatechange = function() { ... };
+ +

例子

+ +

下面这段代码是AudioContext states DEMO (直接运行)中的,其中onstatechange处理器会在每次当前{{domxref("state")}}发生变化时把它输出到控制台。

+ +
audioCtx.onstatechange = function() {
+  console.log(audioCtx.state);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-onstatechange', 'onstatechange')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(43.0)}}{{CompatGeckoDesktop(40.0)}} {{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/resume/index.html b/files/zh-cn/web/api/audiocontext/resume/index.html new file mode 100644 index 0000000000..6491b15d4e --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/resume/index.html @@ -0,0 +1,119 @@ +--- +title: AudioContext.resume() +slug: Web/API/AudioContext/resume +tags: + - AudioContext + - Web Audio API + - resume +translation_of: Web/API/AudioContext/resume +--- +

{{ APIRef("Web Audio API") }}

+ +

{{ domxref("AudioContext") }} 的 resume() 方法,恢复之前暂停播放的音频。

+ +

如果在{{domxref("OfflineAudioContext")}}上调用,会导致INVALID_STATE_ERR错误。

+ +

语法

+ +
var audioCtx = new AudioContext();
+audioCtx.resume().then(function() { ... });
+
+ +

结果

+ +

{{jsxref("Promise")}}成功的话返回空值,返回失败是因为context已经关闭了。

+ +

示例

+ +

下面的代码是 AudioContext states demo (see it running live)的一部分。当点击暂停/恢复按钮的时候,需要{{domxref("AudioContext.state")}}做判断:如果是运行状态,调用{{domxref("suspend()")}},如果是暂停状态,调用resume()。每次点击事件成功后,按钮的文字也会随着变成对应的状态

+ +
susresBtn.onclick = function() {
+  if(audioCtx.state === 'running') {
+    audioCtx.suspend().then(function() {
+      susresBtn.textContent = 'Resume context';
+    });
+  } else if(audioCtx.state === 'suspended') {
+    audioCtx.resume().then(function() {
+      susresBtn.textContent = 'Suspend context';
+    });
+  }
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-resume-Promise-void', 'close()')}}{{Spec2('Web Audio API')}} 
+ +

兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(41.0)}}{{CompatGeckoDesktop(40.0)}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(41.0)}}
+
+ +

参见

+ + diff --git a/files/zh-cn/web/api/audiocontext/samplerate/index.html b/files/zh-cn/web/api/audiocontext/samplerate/index.html new file mode 100644 index 0000000000..b811702e26 --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/samplerate/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.sampleRate +slug: Web/API/AudioContext/sampleRate +translation_of: Web/API/BaseAudioContext/sampleRate +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的sampleRate属性返回一个浮点数表示采样率(每秒采样数), 同一个AudioContext中的所有节点采样率相同,所以不支持采样率转换。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var mySampleRate = audioCtx.sampleRate;
+ +

返回值

+ +

A floating point number.

+ +

例子

+ +
+

注意:想要完整的Web Audio实例,可以查看MDN Github repo上的Web Audio Demo,比如panner-node。不妨试试在浏览器控制台输入audioCtx.sampleRate

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+console.log(audioCtx.sampleRate);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-sampleRate', 'sampleRate')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/state/index.html b/files/zh-cn/web/api/audiocontext/state/index.html new file mode 100644 index 0000000000..97876f5d3d --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/state/index.html @@ -0,0 +1,111 @@ +--- +title: AudioContext.state +slug: Web/API/AudioContext/state +translation_of: Web/API/BaseAudioContext/state +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的state属性是只读的,返回AudioContext的当前状态。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var myState = audioCtx.state;
+ +

返回值

+ +

{{domxref("DOMString")}},可能的值如下:

+ + + +

例子

+ +

下面这段代码是AudioContext states demo (直接运行)中的,其中{{domxref("AudioContext.onstatechange")}}处理器会在每次当前状态发生变化时把它输出到控制台。

+ +
audioCtx.onstatechange = function() {
+  console.log(audioCtx.state);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-state', 'state')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(43.0)}}{{CompatGeckoDesktop(40.0)}} {{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/audiocontext/suspend/index.html b/files/zh-cn/web/api/audiocontext/suspend/index.html new file mode 100644 index 0000000000..1e01cff97a --- /dev/null +++ b/files/zh-cn/web/api/audiocontext/suspend/index.html @@ -0,0 +1,115 @@ +--- +title: AudioContext.suspend() +slug: Web/API/AudioContext/suspend +translation_of: Web/API/AudioContext/suspend +--- +

{{ APIRef("Web Audio API") }}

+ +

{{ domxref("AudioContext") }} 接口的suspend() 方法暂停音频上下文对象中的进度,并暂时剥离进程对音频设备硬件的访问权限, 减少CPU和电池的使用。 当程序在一段时间内不会使用音频上下文对象时,这个方法对减少硬件资源占用是非常有用的。

+ +

若对{{domxref("OfflineAudioContext")}} 调用此方法,将会抛出 INVALID_STATE_ERR 错误。

+ +

Syntax

+ +
var audioCtx = new AudioContext();
+audioCtx.suspend().then(function() { ... });
+
+ +

Returns

+ +

A {{jsxref("Promise")}} that resolves with void. The promise is rejected if the context has already been closed.

+ +

Example

+ +

The following snippet is taken from our AudioContext states demo (see it running live.) When the suspend/resume button is clicked, the {{domxref("AudioContext.state")}} is queried — if it is running, suspend() is called; if it is suspended, {{domxref("resume")}} is called. In each case, the text label of the button is updated as appropriate once the promise resolves.

+ +
susresBtn.onclick = function() {
+  if(audioCtx.state === 'running') {
+    audioCtx.suspend().then(function() {
+      susresBtn.textContent = 'Resume context';
+    });
+  } else if(audioCtx.state === 'suspended') {
+    audioCtx.resume().then(function() {
+      susresBtn.textContent = 'Suspend context';
+    });
+  }
+}
+
+ +

Specifications

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-suspend-Promise-void', 'close()')}}{{Spec2('Web Audio API')}}
+ +

Browser compatibility

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(41.0)}}{{CompatGeckoDesktop(40.0)}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(41.0)}}
+
+ +

See also

+ + -- cgit v1.2.3-54-g00ecf