From 310fd066e91f454b990372ffa30e803cc8120975 Mon Sep 17 00:00:00 2001 From: Florian Merz Date: Thu, 11 Feb 2021 12:56:40 +0100 Subject: unslug zh-cn: move --- .../api/baseaudiocontext/createanalyser/index.html | 154 ++++++++++++++ .../baseaudiocontext/createbiquadfilter/index.html | 139 +++++++++++++ .../api/baseaudiocontext/createbuffer/index.html | 181 +++++++++++++++++ .../baseaudiocontext/createbuffersource/index.html | 150 ++++++++++++++ .../createchannelmerger/index.html | 143 +++++++++++++ .../createchannelsplitter/index.html | 138 +++++++++++++ .../baseaudiocontext/createconvolver/index.html | 131 ++++++++++++ .../api/baseaudiocontext/createdelay/index.html | 213 ++++++++++++++++++++ .../createscriptprocessor/index.html | 199 ++++++++++++++++++ .../baseaudiocontext/createwaveshaper/index.html | 133 ++++++++++++ .../api/baseaudiocontext/currenttime/index.html | 112 +++++++++++ .../baseaudiocontext/decodeaudiodata/index.html | 223 +++++++++++++++++++++ .../api/baseaudiocontext/destination/index.html | 114 +++++++++++ .../web/api/baseaudiocontext/listener/index.html | 112 +++++++++++ .../api/baseaudiocontext/onstatechange/index.html | 101 ++++++++++ .../web/api/baseaudiocontext/samplerate/index.html | 112 +++++++++++ .../web/api/baseaudiocontext/state/index.html | 111 ++++++++++ 17 files changed, 2466 insertions(+) create mode 100644 files/zh-cn/web/api/baseaudiocontext/createanalyser/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createbiquadfilter/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createbuffer/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createbuffersource/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createchannelmerger/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createchannelsplitter/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createconvolver/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createdelay/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createscriptprocessor/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/createwaveshaper/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/currenttime/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/decodeaudiodata/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/destination/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/listener/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/onstatechange/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/samplerate/index.html create mode 100644 files/zh-cn/web/api/baseaudiocontext/state/index.html (limited to 'files/zh-cn/web/api/baseaudiocontext') diff --git a/files/zh-cn/web/api/baseaudiocontext/createanalyser/index.html b/files/zh-cn/web/api/baseaudiocontext/createanalyser/index.html new file mode 100644 index 0000000000..2d00a8a100 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createanalyser/index.html @@ -0,0 +1,154 @@ +--- +title: AudioContext.createAnalyser() +slug: Web/API/AudioContext/createAnalyser +translation_of: Web/API/BaseAudioContext/createAnalyser +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的createAnalyser()方法能创建一个{{ domxref("AnalyserNode") }},可以用来获取音频时间和频率数据,以及实现数据可视化。

+
+ +
+

注意:关于该节点的更多信息,请查看{{domxref("AnalyserNode")}}

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var analyser = audioCtx.createAnalyser();
+ +

返回值

+ +

{{domxref("AnalyserNode")}}对象

+ +

例子

+ +

下面的例子展示了AudioContext创建分析器节点的基本用法,然后用requestAnimationFrame()来反复获取时域数据,并绘制出当前音频输入的“示波器风格”输出。更多完整例子请查看Voice-change-O-matic demo (中app.js的128–205行代码)

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var analyser = audioCtx.createAnalyser();
+
+  ...
+
+analyser.fftSize = 2048;
+var bufferLength = analyser.fftSize;
+var dataArray = new Uint8Array(bufferLength);
+analyser.getByteTimeDomainData(dataArray);
+
+// draw an oscilloscope of the current audio source
+
+function draw() {
+
+      drawVisual = requestAnimationFrame(draw);
+
+      analyser.getByteTimeDomainData(dataArray);
+
+      canvasCtx.fillStyle = 'rgb(200, 200, 200)';
+      canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
+
+      canvasCtx.lineWidth = 2;
+      canvasCtx.strokeStyle = 'rgb(0, 0, 0)';
+
+      canvasCtx.beginPath();
+
+      var sliceWidth = WIDTH * 1.0 / bufferLength;
+      var x = 0;
+
+      for(var i = 0; i < bufferLength; i++) {
+
+        var v = dataArray[i] / 128.0;
+        var y = v * HEIGHT/2;
+
+        if(i === 0) {
+          canvasCtx.moveTo(x, y);
+        } else {
+          canvasCtx.lineTo(x, y);
+        }
+
+        x += sliceWidth;
+      }
+
+      canvasCtx.lineTo(canvas.width, canvas.height/2);
+      canvasCtx.stroke();
+    };
+
+    draw();
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createAnalyser-AnalyserNode', 'createAnalyser()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createbiquadfilter/index.html b/files/zh-cn/web/api/baseaudiocontext/createbiquadfilter/index.html new file mode 100644 index 0000000000..fa5884ad71 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createbiquadfilter/index.html @@ -0,0 +1,139 @@ +--- +title: AudioContext.createBiquadFilter() +slug: Web/API/AudioContext/createBiquadFilter +tags: + - API + - EQ + - Web Audio API + - 参考 + - 方法 + - 滤波器 +translation_of: Web/API/BaseAudioContext/createBiquadFilter +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }} 的createBiquadFilter() 方法创建了一个  {{ domxref("BiquadFilterNode") }}, 它提供了一个可以指定多个不同的一般滤波器类型的双二阶滤波器。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var biquadFilter = audioCtx.createBiquadFilter();
+ +

返回

+ +

一个 {{domxref("BiquadFilterNode")}}.

+ +

示例

+ +

这个例子展示了一个利用AudioContext 创建四项滤波器节点( Biquad filter node)的例子。想要查看完整工作的示例,请查看我们的For voice-change-o-matic 样例 (也可以查看  源码 ).

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+
+//set up the different audio nodes we will use for the app
+var analyser = audioCtx.createAnalyser();
+var distortion = audioCtx.createWaveShaper();
+var gainNode = audioCtx.createGain();
+var biquadFilter = audioCtx.createBiquadFilter();
+var convolver = audioCtx.createConvolver();
+
+// connect the nodes together
+
+source = audioCtx.createMediaStreamSource(stream);
+source.connect(analyser);
+analyser.connect(distortion);
+distortion.connect(biquadFilter);
+biquadFilter.connect(convolver);
+convolver.connect(gainNode);
+gainNode.connect(audioCtx.destination);
+
+// Manipulate the Biquad filter
+
+biquadFilter.type = "lowshelf";
+biquadFilter.frequency.value = 1000;
+biquadFilter.gain.value = 25;
+ +

规格

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createBiquadFilter-BiquadFilterNode', 'createBiquadFilter()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0 {{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

 

+ +

相关

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createbuffer/index.html b/files/zh-cn/web/api/baseaudiocontext/createbuffer/index.html new file mode 100644 index 0000000000..2d29213737 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createbuffer/index.html @@ -0,0 +1,181 @@ +--- +title: AudioContext.createBuffer() +slug: Web/API/AudioContext/createBuffer +tags: + - 创建音频片段 + - 接口 + - 方法 + - 音频环境 +translation_of: Web/API/BaseAudioContext/createBuffer +--- +

音频环境{{ domxref("AudioContext") }} 接口的 createBuffer() 方法用于新建一个空白的 {{ domxref("AudioBuffer") }} 对象,以便用于填充数据,通过 {{ domxref("AudioBufferSourceNode") }} 播放。

+ +

更多关于音频片段(Audio Buffer)的细节,请参考{{ domxref("AudioBuffer") }}页面。

+ +
+

注意: createBuffer() 曾被用于接收压缩后的音频数据,并返回被解码的音频,但是这项功能现在已经被移除,因为所有的解码工作应当在主线程中被完成,createBuffer() 阻塞了其他代码的执行。异步方法 decodeAudioData() 能够完成相同的工作 —— 传入一个压缩过的音频(如MP3格式的文件),并直接返回一个可以通过 {{ domxref("AudioBufferSourceNode") }} 播放的 {{ domxref("AudioBuffer") }} 。因此播放诸如MP3等格式的压缩音频时,你应当使用 decodeAudioData() 方法。

+
+ +

语法

+ +
AudioContext.createBuffer(Number numOfChannels, Number length, Number sampleRate);
+ +

参数

+ +
+

注意:如果想深入了解 audio buffers 是如何工作的、这些参数的具体含义,请阅读这篇简短的指南: Audio buffers: frames, samples and channels(英)。

+
+ +
+
numOfChannels
+
一个定义了 buffer 中包含的声频通道数量的整数。
+ 一个标准的实现必须包含至少32个声频通道。
+
 
+
length
+
一个代表 buffer 中的样本帧数的整数。
+
sampleRate
+
线性音频样本的采样率,即每一秒包含的关键帧的个数。实现过程中必须支持 22050~96000的采样率。
+
+ +

 

+ +

返回值

+ +

一个 {{domxref("AudioBuffer")}}。

+ +

示例

+ +

首先,我们将从几个浅显易懂的示例入手,来解释如何使用这些参数:

+ +
var audioCtx = new AudioContext();
+var buffer = audioCtx.createBuffer(2, 22050, 44100);
+ +

如果你这样调用,你将会得到一个立体声(两个声道)的音频片段(Buffer),当它在一个频率为44100赫兹(这是目前大部分声卡处理声音的频率)的音频环境({{ domxref("AudioContext") }})中播放的时候,会持续0.5秒:22050帧 / 44100赫兹 = 0.5 秒。

+ +
var audioCtx = new AudioContext();
+var buffer = audioCtx.createBuffer(1, 22050, 22050);
+ +

如果你这样调用,你将会得到一个单声道的音频片段(Buffer),当它在一个频率为44100赫兹的音频环境({{ domxref("AudioContext") }})中播放的时候,将会被自动按照44100赫兹*重采样*(因此也会转化为44100赫兹的片段),并持续1秒:44100帧 / 44100赫兹 = 1秒。

+ +
+

注意: 音频重采样与图片的缩放非常类似:比如你有一个16 x 16的图像,但是你想把它填充到一个32 x 32大小的区域,你就要对它进行缩放(重采样)。得到的结果会是一个叫低品质的(图像会模糊或者有锯齿形的边缘,这取决于缩放采用的算法),但它却是能将原图形缩放,并且缩放后的图像占用空间比相同大小的普通图像要小。重新采样的音频道理相同——你会介于一些空间,但事实上你无法产出高频率的声音(高音区)。

+
+ +

现在让我们来看一个更加复杂的示例,我们将创建一个时长2秒的音频片段,并用白噪声填充它,之后通过一个 音频片段源节点({{ domxref("AudioBufferSourceNode") }}) 播放。代码中的注释应该能充分解释发生了什么。你可以 在线演示 ,或者 查看源代码

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var button = document.querySelector('button');
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+
+pre.innerHTML = myScript.innerHTML;
+
+// 立体声
+var channels = 2;
+// 创建一个 采样率与音频环境(AudioContext)相同的 时长2秒的 音频片段。
+var frameCount = audioCtx.sampleRate * 2.0;
+
+var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
+
+button.onclick = function() {
+  // 使用白噪声填充;
+  // 就是 -1.0 到 1.0 之间的随机数
+  for (var channel = 0; channel < channels; channel++) {
+   // 这允许我们读取实际音频片段(AudioBuffer)中包含的数据
+   var nowBuffering = myArrayBuffer.getChannelData(channel);
+   for (var i = 0; i < frameCount; i++) {
+     // Math.random() is in [0; 1.0]
+     // audio needs to be in [-1.0; 1.0]
+     nowBuffering[i] = Math.random() * 2 - 1;
+   }
+  }
+
+  // 获取一个 音频片段源节点(AudioBufferSourceNode)。
+  // 当我们想播放音频片段时,我们会用到这个源节点。
+  var source = audioCtx.createBufferSource();
+  // 把刚才生成的片段加入到 音频片段源节点(AudioBufferSourceNode)。
+  source.buffer = myArrayBuffer;
+  // 把 音频片段源节点(AudioBufferSourceNode) 连接到
+  // 音频环境(AudioContext) 的终节点,这样我们就能听到声音了。
+  source.connect(audioCtx.destination);
+  // 开始播放声源
+  source.start();
+}
+ +

规范

+ + + + + + + + + + + + + + +
规范现状备注
{{SpecName('Web Audio API', '#widl-AudioContext-createBuffer-AudioBuffer-unsigned-long-numberOfChannels-unsigned-long-length-float-sampleRate', 'createBuffer()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0 {{property_prefix("webkit")}}
+ 22
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

相关链接

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createbuffersource/index.html b/files/zh-cn/web/api/baseaudiocontext/createbuffersource/index.html new file mode 100644 index 0000000000..5244513312 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createbuffersource/index.html @@ -0,0 +1,150 @@ +--- +title: AudioContext.createBufferSource() +slug: Web/API/AudioContext/createBufferSource +tags: + - API + - 音源 + - 音频源 + - 音频节点 +translation_of: Web/API/BaseAudioContext/createBufferSource +--- +

{{ APIRef("Web Audio API") }}

+ +
+

createBufferSource() 方法用于创建一个新的{{ domxref("AudioBufferSourceNode") }}接口, 该接口可以通过{{ domxref("AudioBuffer") }} 对象来播放音频数据. {{ domxref("AudioBuffer") }}对象可以通过{{domxref("AudioContext.createBuffer")}} 来创建或者通过 {{domxref("AudioContext.decodeAudioData")}}成功解码音轨后获取.

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var source = audioCtx.createBufferSource();
+ +

返回

+ +

一个{{domxref("AudioBufferSourceNode")}}对象.

+ +

例子

+ +

在这个例子中, 我们将会创建一个2秒的缓冲器,并用白噪音填充它, 然后通过{{ domxref("AudioBufferSourceNode") }}来播放它. 

+ +
+

Note: You can also run the code live, or view the source.

+
+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var button = document.querySelector('button');
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+
+pre.innerHTML = myScript.innerHTML;
+
+// Stereo
+var channels = 2;
+// Create an empty two second stereo buffer at the
+// sample rate of the AudioContext
+var frameCount = audioCtx.sampleRate * 2.0;
+
+var myArrayBuffer = audioCtx.createBuffer(2, frameCount, audioCtx.sampleRate);
+
+button.onclick = function() {
+  // Fill the buffer with white noise;
+  //just random values between -1.0 and 1.0
+  for (var channel = 0; channel < channels; channel++) {
+   // This gives us the actual ArrayBuffer that contains the data
+   var nowBuffering = myArrayBuffer.getChannelData(channel);
+   for (var i = 0; i < frameCount; i++) {
+     // Math.random() is in [0; 1.0]
+     // audio needs to be in [-1.0; 1.0]
+     nowBuffering[i] = Math.random() * 2 - 1;
+   }
+  }
+
+  // Get an AudioBufferSourceNode.
+  // This is the AudioNode to use when we want to play an AudioBuffer
+  var source = audioCtx.createBufferSource();
+  // set the buffer in the AudioBufferSourceNode
+  source.buffer = myArrayBuffer;
+  // connect the AudioBufferSourceNode to the
+  // destination so we can hear the sound
+  source.connect(audioCtx.destination);
+  // start the source playing
+  source.start();
+}
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createBufferSource-AudioBufferSourceNode', 'createBufferSource()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器支持

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createchannelmerger/index.html b/files/zh-cn/web/api/baseaudiocontext/createchannelmerger/index.html new file mode 100644 index 0000000000..281dcddfe7 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createchannelmerger/index.html @@ -0,0 +1,143 @@ +--- +title: AudioContext.createChannelMerger() +slug: Web/API/AudioContext/createChannelMerger +tags: + - API + - Audio + - AudioContext + - Audio_Chinese +translation_of: Web/API/BaseAudioContext/createChannelMerger +--- +

{{ APIRef("Web Audio API") }}

+ +
+

AudioContext.createChannelMerger()方法,会创建一个ChannelMergerNode,后者可以把多个音频流的通道整合到一个音频流。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var merger = audioCtx.createChannelMerger(2);
+ +

参数

+ +
+
numberOfInputs
+
通道输入音频流的数量,输出流将包含这个数量的通道。默认值6。
+
+ +

返回值

+ +

一个 {{domxref("ChannelMergerNode")}}.

+ +

(举个)栗(例)子

+ +

下面的例子展示了如何分离立体音轨(就是一段音乐),处理使左右声道不同。使用的时候,需要指定AudioNode.connect(AudioNode)方法的第二个和第三个参数,分别用来指定通道链接来源的索引和输出的索引。

+ +
var ac = new AudioContext();
+ac.decodeAudioData(someStereoBuffer, function(data) {
+ var source = ac.createBufferSource();
+ source.buffer = data;
+ var splitter = ac.createChannelSplitter(2);
+ source.connect(splitter);
+ var merger = ac.createChannelMerger(2);
+
+ // Reduce the volume of the left channel only
+ var gainNode = ac.createGain();
+ gainNode.gain.value = 0.5;
+ splitter.connect(gainNode, 0);
+
+ // Connect the splitter back to the second input of the merger: we
+ // effectively swap the channels, here, reversing the stereo image.
+ gainNode.connect(merger, 0, 1);
+ splitter.connect(merger, 1, 0);
+
+ var dest = ac.createMediaStreamDestination();
+
+ // Because we have used a ChannelMergerNode, we now have a stereo
+ // MediaStream we can use to pipe the Web Audio graph to WebRTC,
+ // MediaRecorder, etc.
+ merger.connect(dest);
+});
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createChannelMerger-ChannelMergerNode-unsigned-long-numberOfInputs', 'createChannelMerger()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

相关页面

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createchannelsplitter/index.html b/files/zh-cn/web/api/baseaudiocontext/createchannelsplitter/index.html new file mode 100644 index 0000000000..f46f5be2c5 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createchannelsplitter/index.html @@ -0,0 +1,138 @@ +--- +title: AudioContext.createChannelSplitter() +slug: Web/API/AudioContext/createChannelSplitter +translation_of: Web/API/BaseAudioContext/createChannelSplitter +--- +

{{ APIRef("Web Audio API") }}

+ +
+

The createChannelSplitter() method of the {{ domxref("AudioContext") }} Interface is used to create a {{domxref("ChannelSplitterNode")}}, which is used to access the individual channels of an audio stream and process them separately.

+
+ +

Syntax

+ +
var audioCtx = new AudioContext();
+var splitter = audioCtx.createChannelSplitter(2);
+ +

参数

+ +
+
numberOfOutputs
+
你期待将输入音频分割成的声道道数目; 当不传入参数时,默认为6
+
+ +

Returns

+ +

一个 {{domxref("ChannelSplitterNode")}}.

+ +

Example

+ +

下面这个简单的例子告诉你怎样分割一个双声道音轨 (或者说一段音乐), 以及对于左右声道不同的处理. 要使用它们, 你需要用到{{domxref("AudioNode.connect(AudioNode)") }}方法的第二个和第三个参数, 他们会指定链接声道源的序号和链接到的声道序号.

+ +
var ac = new AudioContext();
+ac.decodeAudioData(someStereoBuffer, function(data) {
+ var source = ac.createBufferSource();
+ source.buffer = data;
+ var splitter = ac.createChannelSplitter(2);
+ source.connect(splitter);
+ var merger = ac.createChannelMerger(2);
+
+ // Reduce the volume of the left channel only
+ var gainNode = ac.createGain();
+ gainNode.gain.value = 0.5;
+ splitter.connect(gainNode, 0);
+
+ // Connect the splitter back to the second input of the merger: we
+ // effectively swap the channels, here, reversing the stereo image.
+ gainNode.connect(merger, 0, 1);
+ splitter.connect(merger, 1, 0);
+
+ var dest = ac.createMediaStreamDestination();
+
+ // Because we have used a ChannelMergerNode, we now have a stereo
+ // MediaStream we can use to pipe the Web Audio graph to WebRTC,
+ // MediaRecorder, etc.
+ merger.connect(dest);
+});
+ +

规格

+ + + + + + + + + + + + + + +
规格状态注释
{{SpecName('Web Audio API', '#widl-AudioContext-createChannelSplitter-ChannelSplitterNode-unsigned-long-numberOfOutputs', 'createChannelSplitter()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createconvolver/index.html b/files/zh-cn/web/api/baseaudiocontext/createconvolver/index.html new file mode 100644 index 0000000000..2cbe395edc --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createconvolver/index.html @@ -0,0 +1,131 @@ +--- +title: AudioContext.createConvolver() +slug: Web/API/AudioContext/createConvolver +translation_of: Web/API/BaseAudioContext/createConvolver +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的方法createConvolver()能创建一个{{ domxref("ConvolverNode") }},通常用来对你的音频应用混响效果。在 Convolution规范定义 中查看更多信息。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var convolver = audioCtx.createConvolver();
+ +

返回值

+ +

{{domxref("ConvolverNode")}}对象。

+ +

例子

+ +

下面的例子展示了一个 AudioContext 创建一个 混响器节点 的基本使用方法。基本前提是你创建一个包含声音样本的 AudioBuffer 用作混响环境 (称之为脉冲响应,) 和在混响器中应用。 下面的例子使用了一个简短的示例音乐厅人群效果,所以混响效果应用深度和回声。

+ +

更多完整例子请查看Voice-change-O-matic demo (中app.js的代码)。

+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var convolver = audioCtx.createConvolver();
+
+  ...
+
+// grab audio track via XHR for convolver node
+
+var soundSource, concertHallBuffer;
+
+ajaxRequest = new XMLHttpRequest();
+ajaxRequest.open('GET', 'concert-crowd.ogg', true);
+ajaxRequest.responseType = 'arraybuffer';
+
+ajaxRequest.onload = function() {
+  var audioData = ajaxRequest.response;
+  audioCtx.decodeAudioData(audioData, function(buffer) {
+      concertHallBuffer = buffer;
+      soundSource = audioCtx.createBufferSource();
+      soundSource.buffer = concertHallBuffer;
+    }, function(e){"Error with decoding audio data" + e.err});
+}
+
+ajaxRequest.send();
+
+  ...
+
+convolver.buffer = concertHallBuffer;
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createConvolver-ConvolverNode', 'createConvolver()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createdelay/index.html b/files/zh-cn/web/api/baseaudiocontext/createdelay/index.html new file mode 100644 index 0000000000..b8e502758d --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createdelay/index.html @@ -0,0 +1,213 @@ +--- +title: AudioContext.createDelay() +slug: Web/API/AudioContext/createDelay +translation_of: Web/API/BaseAudioContext/createDelay +--- +

{{ APIRef("Web Audio API") }}

+ +
+

  createDelay() 是  {{ domxref("AudioContext") }}   的一个方法,作用是将输入音频信号延迟一定时间。(比如可以实现 对着话筒说句话,然后几秒后 这句话从音响里播放出来)

+ +

 

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var synthDelay = audioCtx.createDelay(maxDelayTime);
+ +

参数

+ +
+
maxDelayTime
+
设置最大允许延迟的时间,以“秒”为单位
+
+ +

返回

+ +

A {{domxref("DelayNode")}}. The default {{domxref("DelayNode.delayTime")}} if no parameter is passed to createDelay() is 0 seconds.

+ +

以上是原文,大意是返回延时时间,没有设置时默认是0

+ +

 

+ +

示例

+ +

首先是中文版的简洁的示例,这个例子中 话筒里接收到的声音 会延迟3秒 从音响中播放

+ +
window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext;
+
+try {//音频相关api
+    var audioContext = new window.AudioContext();
+    var synthDelay = audioContext.createDelay(5.0);
+} catch (e) {
+    alert("你浏览器不支持");
+}
+
+
+var error = function (error) {alert("有错误"); };
+
+//以下是获取麦克风
+if (navigator.getUserMedia) { //标准api
+ navigator.getUserMedia({ "audio": true },
+ function (stream) {
+ micto(stream);    //具体工作
+                   }, error);
+}else if(navigator.webkitGetUserMedia) {   //webkit api
+ navigator.webkitGetUserMedia({audio:true, video:  false },
+ function (stream) {
+  micto(stream); //具体工作
+                   }, error);
+ }else if (navigator.mozGetUserMedia) {  //火狐 api
+ navigator.mozGetUserMedia({ "audio": true },
+ function (stream) {
+  micto(stream);//具体工作
+                   }, error);
+ }else if (navigator.msGetUserMedia) { //ie api
+ navigator.msGetUserMedia({ "audio": true },
+ function (stream) {
+  micto(stream);//具体工作
+                   }, error);
+ } else {
+   alert("您的浏览器版不支持这个api");
+}
+
+
+
+
+
+ var micto = function(stream) {
+
+  synthDelay.delayTime.value = 3.0;   //延迟3秒
+
+  var source = audioContext.createMediaStreamSource(stream);
+
+  source.connect(synthDelay);
+
+  synthDelay.connect(audioContext.destination);
+
+      }
+ 
+ +

 

+ +

 以下是英文版示例

+ +

We have created a simple example that allows you to play three different samples on a constant loop — see create-delay (you can also view the source code). If you just press the play buttons, the loops will start immediately; if you slide the sliders up to the right, then press the play buttons, a delay will be introduced, so the looping sounds don't start playing for a short amount of time.

+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+
+var synthDelay = audioCtx.createDelay(5.0);
+
+  ...
+
+var synthSource;
+
+playSynth.onclick = function() {
+  synthSource = audioCtx.createBufferSource();
+  synthSource.buffer = buffers[2];
+  synthSource.loop = true;
+  synthSource.start();
+  synthSource.connect(synthDelay);
+  synthDelay.connect(destination);
+  this.setAttribute('disabled', 'disabled');
+}
+
+stopSynth.onclick = function() {
+  synthSource.disconnect(synthDelay);
+  synthDelay.disconnect(destination);
+  synthSource.stop();
+  playSynth.removeAttribute('disabled');
+}
+
+...
+
+var delay1;
+rangeSynth.oninput = function() {
+delay1 = rangeSynth.value;
+synthDelay.delayTime.value = delay1;
+}
+
+ +

Specifications

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createDelay-DelayNode-double-maxDelayTime', 'createDelay()')}}{{Spec2('Web Audio API')}} 
+ +

Browser compatibility

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createscriptprocessor/index.html b/files/zh-cn/web/api/baseaudiocontext/createscriptprocessor/index.html new file mode 100644 index 0000000000..7e505bc06a --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createscriptprocessor/index.html @@ -0,0 +1,199 @@ +--- +title: AudioContext.createScriptProcessor() +slug: Web/API/AudioContext/createScriptProcessor +translation_of: Web/API/BaseAudioContext/createScriptProcessor +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }} 接口的createScriptProcessor() 方法创建一个{{domxref("ScriptProcessorNode")}} 用于通过JavaScript直接处理音频.

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+myScriptProcessor = audioCtx.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ +

参数

+ +
+
bufferSize
+
缓冲区大小,以样本帧为单位。具体来讲,缓冲区大小必须是下面这些值当中的某一个: 256, 512, 1024, 2048, 4096, 8192, 16384. 如果不传,或者参数为0,则取当前环境最合适的缓冲区大小, 取值为2的幂次方的一个常数,在该node的整个生命周期中都不变.
+
该取值控制着audioprocess事件被分派的频率,以及每一次调用多少样本帧被处理. 较低bufferSzie将导致一定的延迟。较高的bufferSzie就要注意避免音频的崩溃和故障。推荐作者不要给定具体的缓冲区大小,让系统自己选一个好的值来平衡延迟和音频质量。
+
numberOfInputChannels
+
值为整数,用于指定输入node的声道的数量,默认值是2,最高能取32.
+
numberOfOutputChannels
+
值为整数,用于指定输出node的声道的数量,默认值是2,最高能取32.
+
+ +
+

重要: Webkit (version 31)要求调用这个方法的时候必须传入一个有效的bufferSize .

+
+ +
+

注意: numberOfInputChannelsnumberOfOutputChannels的值不能同时为0,二者同时为0是无效的

+
+ +

返回

+ +

A {{domxref("ScriptProcessorNode")}}.

+ +

示例

+ +

下面的例子展示了一个ScriptProcessorNode的基本用法,数据源取自 {{ domxref("AudioContext.decodeAudioData") }}, 给每一个音频样本加一点白噪声,然后通过{{domxref("AudioDestinationNode")}}播放(其实这个就是系统的扬声器)。 对于每一个声道和样本帧,在把结果当成输出样本之前,scriptNode.onaudioprocess方法关联audioProcessingEvent ,并用它来遍历每输入流的每一个声道,和每一个声道中的每一个样本,并添加一点白噪声。

+ +
+

注意: 完整的示例参照 script-processor-node github (查看源码 source code.)

+
+ +
var myScript = document.querySelector('script');
+var myPre = document.querySelector('pre');
+var playButton = document.querySelector('button');
+
+// Create AudioContext and buffer source
+var audioCtx = new AudioContext();
+source = audioCtx.createBufferSource();
+
+// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
+var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
+console.log(scriptNode.bufferSize);
+
+// load in an audio track via XHR and decodeAudioData
+
+function getData() {
+  request = new XMLHttpRequest();
+  request.open('GET', 'viper.ogg', true);
+  request.responseType = 'arraybuffer';
+  request.onload = function() {
+    var audioData = request.response;
+
+    audioCtx.decodeAudioData(audioData, function(buffer) {
+    myBuffer = buffer;
+    source.buffer = myBuffer;
+  },
+    function(e){"Error with decoding audio data" + e.err});
+  }
+  request.send();
+}
+
+// Give the node a function to process audio events
+scriptNode.onaudioprocess = function(audioProcessingEvent) {
+  // The input buffer is the song we loaded earlier
+  var inputBuffer = audioProcessingEvent.inputBuffer;
+
+  // The output buffer contains the samples that will be modified and played
+  var outputBuffer = audioProcessingEvent.outputBuffer;
+
+  // Loop through the output channels (in this case there is only one)
+  for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
+    var inputData = inputBuffer.getChannelData(channel);
+    var outputData = outputBuffer.getChannelData(channel);
+
+    // Loop through the 4096 samples
+    for (var sample = 0; sample < inputBuffer.length; sample++) {
+      // make output equal to the same as the input
+      outputData[sample] = inputData[sample];
+
+      // add noise to each output sample
+      outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
+    }
+  }
+}
+
+getData();
+
+// wire up play button
+playButton.onclick = function() {
+  source.connect(scriptNode);
+  scriptNode.connect(audioCtx.destination);
+  source.start();
+}
+
+// When the buffer source stops playing, disconnect everything
+source.onended = function() {
+  source.disconnect(scriptNode);
+  scriptNode.disconnect(audioCtx.destination);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels', 'createScriptProcessor')}}{{Spec2('Web Audio API')}}
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/createwaveshaper/index.html b/files/zh-cn/web/api/baseaudiocontext/createwaveshaper/index.html new file mode 100644 index 0000000000..7aef8d5688 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/createwaveshaper/index.html @@ -0,0 +1,133 @@ +--- +title: AudioContext.createWaveShaper() +slug: Web/API/AudioContext/createWaveShaper +translation_of: Web/API/BaseAudioContext/createWaveShaper +--- +

{{ APIRef("Web Audio API") }}

+ +

{{ domxref("AudioContext") }} 接口的createWaveShaper()方法创建了 表示非线性失真的{{ domxref("WaveShaperNode") }}。该节点通常被用来给音频添加失真效果

+ +

语法

+ +
var audioCtx = new AudioContext();
+var distortion = audioCtx.createWaveShaper();
+ +

返回

+ +

A {{domxref("WaveShaperNode")}}.

+ +

例子

+ +

The following example shows basic usage of an AudioContext to create a wave shaper node. For applied examples/information, check out our Voice-change-O-matic demo (see app.js for relevant code).

+ +

下面的例子展示了AudioContext创建一个波形整形器节点的基本用法。有关应用示例/信息,请查看我们的oice-change-O-matic demo演示(有关代码,请参阅app.js)。

+ +
+

:实现失真曲线并不是简单的事情,你可能需要到处找资料来找到这样的算法。我们在Stack Overflow上找到了以下的失真曲线代码

+
+ +
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var distortion = audioCtx.createWaveShaper();
+
+  ...
+
+function makeDistortionCurve(amount) {
+  var k = typeof amount === 'number' ? amount : 50,
+    n_samples = 44100,
+    curve = new Float32Array(n_samples),
+    deg = Math.PI / 180,
+    i = 0,
+    x;
+  for ( ; i < n_samples; ++i ) {
+    x = i * 2 / n_samples - 1;
+    curve[i] = ( 3 + k ) * x * 20 * deg / ( Math.PI + k * Math.abs(x) );
+  }
+  return curve;
+};
+
+  ...
+
+distortion.curve = makeDistortionCurve(400);
+distortion.oversample = '4x';
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-createWaveShaper-WaveShaperNode', 'createWaveShaper()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + +
FeatureChromeEdgeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatVersionUnknown}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidEdgeFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/currenttime/index.html b/files/zh-cn/web/api/baseaudiocontext/currenttime/index.html new file mode 100644 index 0000000000..fbdaf4315c --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/currenttime/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.currentTime +slug: Web/API/AudioContext/currentTime +translation_of: Web/API/BaseAudioContext/currentTime +--- +

{{ APIRef("Web Audio API") }}

+ +
+

currentTime是{{ domxref("AudioContext") }}的一个read-only属性,返回double秒(从0开始)表示一个只增不减的硬件时间戳,可以用来控制音频回放,实现可视化时间轴等等。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+console.log(audioCtx.currentTime);
+ +

返回值

+ +

A double.

+ +

例子

+ +
+

注意:想要完整的Web Audio例子的话,可以去MDN Github repo看DEMO(例如panner-node不妨试试在浏览器控制台输入audioCtx.currentTime。

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+console.log(audioCtx.currentTime);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-currentTime', 'currentTime')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/decodeaudiodata/index.html b/files/zh-cn/web/api/baseaudiocontext/decodeaudiodata/index.html new file mode 100644 index 0000000000..40693fd8cc --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/decodeaudiodata/index.html @@ -0,0 +1,223 @@ +--- +title: AudioContext.decodeAudioData() +slug: Web/API/AudioContext/decodeAudioData +tags: + - API + - Audio + - audio接口 + - 音频解码 +translation_of: Web/API/BaseAudioContext/decodeAudioData +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}接口的decodeAudioData()方法可用于异步解码音频文件中的 {{domxref("ArrayBuffer")}}. ArrayBuffer数据可以通过{{domxref("XMLHttpRequest")}}和{{domxref("FileReader")}}来获取. AudioBuffer是通过AudioContext采样率进行解码的,然后通过回调返回结果.

+
+ +

这是从音频轨道创建用于web audio API音频源的首选方法。

+ +

语法

+ +

旧版的回调函数语法

+ +
audioCtx.decodeAudioData(audioData, function(decodedData) {
+  // use the dec​oded data here
+});
+ +

新版的promise-based语法:

+ +
audioCtx.decodeAudioData(audioData).then(function(decodedData) {
+  // use the decoded data here
+});
+ +

举例

+ +

在本章节中,我们将首先学习基于回调的系统,然后采用新的基于promise-based的语法

+ +

旧的回调语法

+ +

在这个事例中, getData() 方法使用XHR加载一个音轨,设置请求的responsetype为ArrayBuffer使它返回一个arraybuffer数据,然后存储在audioData变量中. 然后我们将这个arraybuffer数据置于decodeAudioData()方法中使用,当成功解码PCM Data后通过回调返回, 将返回的结果通过{{ domxref("AudioContext.createBufferSource()") }}接口进行处理并获得一个{{ domxref("AudioBufferSourceNode") }}, 将源连接至{{domxref("AudioContext.destination") }}并将它设置为循环的.

+ +

通过按钮来运行 getData() 来获取音轨并播放它. 当使用 stop() 方法后source将会被清除.

+ +
+

Note: You can run the example live (or view the source.)

+
+ +
// define variables
+
+var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
+var source;
+
+var pre = document.querySelector('pre');
+var myScript = document.querySelector('script');
+var play = document.querySelector('.play');
+var stop = document.querySelector('.stop');
+
+// use XHR to load an audio track, and
+// decodeAudioData to decode it and stick it in a buffer.
+// Then we put the buffer into the source
+
+function getData() {
+  source = audioCtx.createBufferSource();
+  var request = new XMLHttpRequest();
+
+  request.open('GET', 'viper.ogg', true);
+
+  request.responseType = 'arraybuffer';
+
+
+  request.onload = function() {
+    var audioData = request.response;
+
+    audioCtx.decodeAudioData(audioData, function(buffer) {
+        source.buffer = buffer;
+
+        source.connect(audioCtx.destination);
+        source.loop = true;
+      },
+
+      function(e){"Error with decoding audio data" + e.err});
+
+  }
+
+  request.send();
+}
+
+// wire up buttons to stop and play audio
+
+play.onclick = function() {
+  getData();
+  source.start(0);
+  play.setAttribute('disabled', 'disabled');
+}
+
+stop.onclick = function() {
+  source.stop(0);
+  play.removeAttribute('disabled');
+}
+
+
+// dump script to pre element
+
+pre.innerHTML = myScript.innerHTML;
+ +

新的promise-based语法

+ +
ctx.decodeAudioData(compressedBuffer).then(function(decodedData) {
+ // use the decoded data here
+});
+ +

参数

+ +
+
ArrayBuffer
+
将会被解码的音频数据,可通过{{domxref("XMLHttpRequest")}}或{{domxref("FileReader")}}来获取.
+
DecodeSuccessCallback
+
当成功解码后会被调用的回调函数. 该回调函数只有一个AudioBuffer类型参数.
+
DecodeErrorCallback
+
一个可选的错误回调函数.
+
+ +

返回

+ +

一个 {{domxref("Promise") }}对象.

+ +

标准

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-decodeAudioData-Promise-AudioBuffer--ArrayBuffer-audioData-DecodeSuccessCallback-successCallback-DecodeErrorCallback-errorCallback', 'decodeAudioData()')}}{{Spec2('Web Audio API')}} 
+ +

浏览器支持

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
Promise-based syntax{{CompatChrome(49.0)}}{{CompatVersionUnknown}}{{CompatNo}}{{CompatVersionUnknown}}{{CompatNo}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidAndroid WebviewFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatVersionUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(33.0)}}
Promise-based syntax{{CompatUnknown}}{{CompatChrome(49.0)}}{{CompatVersionUnknown}}{{CompatVersionUnknown}}{{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}{{CompatChrome(49.0)}}
+
+ +

See also

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/destination/index.html b/files/zh-cn/web/api/baseaudiocontext/destination/index.html new file mode 100644 index 0000000000..04fdfe8247 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/destination/index.html @@ -0,0 +1,114 @@ +--- +title: AudioContext.destination +slug: Web/API/AudioContext/destination +translation_of: Web/API/BaseAudioContext/destination +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的destination属性返回一个{{ domxref("AudioDestinationNode") }}表示context中所有音频(节点)的最终目标节点,一般是音频渲染设备,比如扬声器。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+gainNode.connect(audioCtx.destination);
+ +

返回值

+ +

An {{ domxref("AudioDestinationNode") }}.

+ +

例子

+ +
+

注意:想要完整的例子,可以去看看MDN Github repo的DEMO,比如panner-node

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+var oscillatorNode = audioCtx.createOscillator();
+var gainNode = audioCtx.createGain();
+
+oscillatorNode.connect(gainNode);
+gainNode.connect(audioCtx.destination);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-destination', 'destination')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/listener/index.html b/files/zh-cn/web/api/baseaudiocontext/listener/index.html new file mode 100644 index 0000000000..81b2a730a2 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/listener/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.listener +slug: Web/API/AudioContext/listener +translation_of: Web/API/BaseAudioContext/listener +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的listener属性返回一个{{ domxref("AudioListener") }} 对象,可以用来实现3D音频空间化。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var myListener = audioCtx.listener;
+ +

返回值

+ +

An {{ domxref("AudioListener") }} object.

+ +

例子

+ +
+

注意:想要完整的音频空间化例子,可以查看panner-node DEMO

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+var myListener = audioCtx.listener;
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-listener', 'listener')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/onstatechange/index.html b/files/zh-cn/web/api/baseaudiocontext/onstatechange/index.html new file mode 100644 index 0000000000..ee9b3f21c0 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/onstatechange/index.html @@ -0,0 +1,101 @@ +--- +title: AudioContext.onstatechange +slug: Web/API/AudioContext/onstatechange +translation_of: Web/API/BaseAudioContext/onstatechange +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的onstatechange属性定义了一个事件处理器函数,触发{{Event("statechange")}}会被调用,也就是说audio context的状态发生变化时会执行。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+audioCtx.onstatechange = function() { ... };
+ +

例子

+ +

下面这段代码是AudioContext states DEMO (直接运行)中的,其中onstatechange处理器会在每次当前{{domxref("state")}}发生变化时把它输出到控制台。

+ +
audioCtx.onstatechange = function() {
+  console.log(audioCtx.state);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-onstatechange', 'onstatechange')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(43.0)}}{{CompatGeckoDesktop(40.0)}} {{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/samplerate/index.html b/files/zh-cn/web/api/baseaudiocontext/samplerate/index.html new file mode 100644 index 0000000000..b811702e26 --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/samplerate/index.html @@ -0,0 +1,112 @@ +--- +title: AudioContext.sampleRate +slug: Web/API/AudioContext/sampleRate +translation_of: Web/API/BaseAudioContext/sampleRate +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的sampleRate属性返回一个浮点数表示采样率(每秒采样数), 同一个AudioContext中的所有节点采样率相同,所以不支持采样率转换。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var mySampleRate = audioCtx.sampleRate;
+ +

返回值

+ +

A floating point number.

+ +

例子

+ +
+

注意:想要完整的Web Audio实例,可以查看MDN Github repo上的Web Audio Demo,比如panner-node。不妨试试在浏览器控制台输入audioCtx.sampleRate

+
+ +
var AudioContext = window.AudioContext || window.webkitAudioContext;
+var audioCtx = new AudioContext();
+// Older webkit/blink browsers require a prefix
+
+...
+
+console.log(audioCtx.sampleRate);
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-sampleRate', 'sampleRate')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(10.0)}}{{property_prefix("webkit")}}{{CompatGeckoDesktop(25.0)}} {{CompatNo}}15.0{{property_prefix("webkit")}}
+ 22 (unprefixed)
6.0{{property_prefix("webkit")}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}26.01.2{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}33.0
+
+ +

另见

+ + diff --git a/files/zh-cn/web/api/baseaudiocontext/state/index.html b/files/zh-cn/web/api/baseaudiocontext/state/index.html new file mode 100644 index 0000000000..97876f5d3d --- /dev/null +++ b/files/zh-cn/web/api/baseaudiocontext/state/index.html @@ -0,0 +1,111 @@ +--- +title: AudioContext.state +slug: Web/API/AudioContext/state +translation_of: Web/API/BaseAudioContext/state +--- +

{{ APIRef("Web Audio API") }}

+ +
+

{{ domxref("AudioContext") }}的state属性是只读的,返回AudioContext的当前状态。

+
+ +

语法

+ +
var audioCtx = new AudioContext();
+var myState = audioCtx.state;
+ +

返回值

+ +

{{domxref("DOMString")}},可能的值如下:

+ + + +

例子

+ +

下面这段代码是AudioContext states demo (直接运行)中的,其中{{domxref("AudioContext.onstatechange")}}处理器会在每次当前状态发生变化时把它输出到控制台。

+ +
audioCtx.onstatechange = function() {
+  console.log(audioCtx.state);
+}
+
+ +

规范

+ + + + + + + + + + + + + + +
SpecificationStatusComment
{{SpecName('Web Audio API', '#widl-AudioContext-state', 'state')}}{{Spec2('Web Audio API')}} 
+ +

浏览器兼容性

+ +
{{CompatibilityTable}}
+ +
+ + + + + + + + + + + + + + + + + + + +
FeatureChromeFirefox (Gecko)Internet ExplorerOperaSafari (WebKit)
Basic support{{CompatChrome(43.0)}}{{CompatGeckoDesktop(40.0)}} {{CompatNo}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +
FeatureAndroidFirefox Mobile (Gecko)Firefox OSIE MobileOpera MobileSafari MobileChrome for Android
Basic support{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}{{CompatUnknown}}
+
+ +

另见

+ + -- cgit v1.2.3-54-g00ecf