aboutsummaryrefslogtreecommitdiff
path: root/files/zh-cn/web/api/audiocontext
diff options
context:
space:
mode:
authorFlorian Merz <me@fiji-flo.de>2021-02-11 12:56:40 +0100
committerFlorian Merz <me@fiji-flo.de>2021-02-11 12:56:40 +0100
commit310fd066e91f454b990372ffa30e803cc8120975 (patch)
treed5d900deb656a5da18e0b60d00f0db73f3a2e88e /files/zh-cn/web/api/audiocontext
parent8260a606c143e6b55a467edf017a56bdcd6cba7e (diff)
downloadtranslated-content-310fd066e91f454b990372ffa30e803cc8120975.tar.gz
translated-content-310fd066e91f454b990372ffa30e803cc8120975.tar.bz2
translated-content-310fd066e91f454b990372ffa30e803cc8120975.zip
unslug zh-cn: move
Diffstat (limited to 'files/zh-cn/web/api/audiocontext')
-rw-r--r--files/zh-cn/web/api/audiocontext/createanalyser/index.html154
-rw-r--r--files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html139
-rw-r--r--files/zh-cn/web/api/audiocontext/createbuffer/index.html181
-rw-r--r--files/zh-cn/web/api/audiocontext/createbuffersource/index.html150
-rw-r--r--files/zh-cn/web/api/audiocontext/createchannelmerger/index.html143
-rw-r--r--files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html138
-rw-r--r--files/zh-cn/web/api/audiocontext/createconvolver/index.html131
-rw-r--r--files/zh-cn/web/api/audiocontext/createdelay/index.html213
-rw-r--r--files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html199
-rw-r--r--files/zh-cn/web/api/audiocontext/createwaveshaper/index.html133
-rw-r--r--files/zh-cn/web/api/audiocontext/currenttime/index.html112
-rw-r--r--files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html223
-rw-r--r--files/zh-cn/web/api/audiocontext/destination/index.html114
-rw-r--r--files/zh-cn/web/api/audiocontext/listener/index.html112
-rw-r--r--files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html95
-rw-r--r--files/zh-cn/web/api/audiocontext/onstatechange/index.html101
-rw-r--r--files/zh-cn/web/api/audiocontext/samplerate/index.html112
-rw-r--r--files/zh-cn/web/api/audiocontext/state/index.html111
18 files changed, 0 insertions, 2561 deletions
diff --git a/files/zh-cn/web/api/audiocontext/createanalyser/index.html b/files/zh-cn/web/api/audiocontext/createanalyser/index.html
deleted file mode 100644
index 2d00a8a100..0000000000
--- a/files/zh-cn/web/api/audiocontext/createanalyser/index.html
+++ /dev/null
@@ -1,154 +0,0 @@
----
-title: AudioContext.createAnalyser()
-slug: Web/API/AudioContext/createAnalyser
-translation_of: Web/API/BaseAudioContext/createAnalyser
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>createAnalyser()</code>方法能创建一个{{ domxref("AnalyserNode") }},可以用来获取音频时间和频率数据,以及实现数据可视化。</p>
-</div>
-
-<div class="note">
-<p><strong>注意</strong>:关于该节点的更多信息,请查看{{domxref("AnalyserNode")}}</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var analyser = audioCtx.createAnalyser();</pre>
-
-<h3 id="Description" name="Description">返回值</h3>
-
-<p>{{domxref("AnalyserNode")}}对象</p>
-
-<h2 id="Examples" name="Examples">例子</h2>
-
-<p>下面的例子展示了AudioContext创建分析器节点的基本用法,然后用requestAnimationFrame()来反复获取时域数据,并绘制出当前音频输入的“示波器风格”输出。更多完整例子请查看<a href="https://mdn.github.io/voice-change-o-matic/">Voice-change-O-matic</a> demo (中<a href="https://github.com/mdn/voice-change-o-matic/blob/gh-pages/scripts/app.js#L128-L205">app.js的128–205行</a>代码)</p>
-
-<pre class="brush: js">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var analyser = audioCtx.createAnalyser();
-
- ...
-
-analyser.fftSize = 2048;
-var bufferLength = analyser.fftSize;
-var dataArray = new Uint8Array(bufferLength);
-analyser.getByteTimeDomainData(dataArray);
-
-// draw an oscilloscope of the current audio source
-
-function draw() {
-
-      drawVisual = requestAnimationFrame(draw);
-
-      analyser.getByteTimeDomainData(dataArray);
-
-      canvasCtx.fillStyle = 'rgb(200, 200, 200)';
-      canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
-
-      canvasCtx.lineWidth = 2;
-      canvasCtx.strokeStyle = 'rgb(0, 0, 0)';
-
-      canvasCtx.beginPath();
-
-      var sliceWidth = WIDTH * 1.0 / bufferLength;
-      var x = 0;
-
-      for(var i = 0; i &lt; bufferLength; i++) {
-
-        var v = dataArray[i] / 128.0;
-        var y = v * HEIGHT/2;
-
-        if(i === 0) {
-          canvasCtx.moveTo(x, y);
-        } else {
-          canvasCtx.lineTo(x, y);
-        }
-
-        x += sliceWidth;
-      }
-
-      canvasCtx.lineTo(canvas.width, canvas.height/2);
-      canvasCtx.stroke();
-    };
-
-    draw();</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createAnalyser-AnalyserNode', 'createAnalyser()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html b/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html
deleted file mode 100644
index fa5884ad71..0000000000
--- a/files/zh-cn/web/api/audiocontext/createbiquadfilter/index.html
+++ /dev/null
@@ -1,139 +0,0 @@
----
-title: AudioContext.createBiquadFilter()
-slug: Web/API/AudioContext/createBiquadFilter
-tags:
- - API
- - EQ
- - Web Audio API
- - 参考
- - 方法
- - 滤波器
-translation_of: Web/API/BaseAudioContext/createBiquadFilter
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }} 的<code>createBiquadFilter()</code> 方法创建了一个  {{ domxref("BiquadFilterNode") }}, 它提供了一个可以指定多个不同的一般滤波器类型的双二阶滤波器。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var biquadFilter = audioCtx.createBiquadFilter();</pre>
-
-<h3 id="Description" name="Description">返回</h3>
-
-<p>一个 {{domxref("BiquadFilterNode")}}.</p>
-
-<h2 id="Example" name="Example">示例</h2>
-
-<p>这个例子展示了一个利用AudioContext 创建四项滤波器节点( Biquad filter node)的例子。想要查看完整工作的示例,请查看我们的For <a href="http://mdn.github.io/voice-change-o-matic/">voice-change-o-matic</a> 样例 (也可以查看  <a href="https://github.com/mdn/voice-change-o-matic">源码</a> ).</p>
-
-<pre class="brush: js">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-
-//set up the different audio nodes we will use for the app
-var analyser = audioCtx.createAnalyser();
-var distortion = audioCtx.createWaveShaper();
-var gainNode = audioCtx.createGain();
-var biquadFilter = audioCtx.createBiquadFilter();
-var convolver = audioCtx.createConvolver();
-
-// connect the nodes together
-
-source = audioCtx.createMediaStreamSource(stream);
-source.connect(analyser);
-analyser.connect(distortion);
-distortion.connect(biquadFilter);
-biquadFilter.connect(convolver);
-convolver.connect(gainNode);
-gainNode.connect(audioCtx.destination);
-
-// Manipulate the Biquad filter
-
-biquadFilter.type = "lowshelf";
-biquadFilter.frequency.value = 1000;
-biquadFilter.gain.value = 25;</pre>
-
-<h2 id="规格">规格</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createBiquadFilter-BiquadFilterNode', 'createBiquadFilter()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0 {{property_prefix("webkit")}}<br>
- 22</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="sect1"> </h2>
-
-<h2 id="相关">相关</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createbuffer/index.html b/files/zh-cn/web/api/audiocontext/createbuffer/index.html
deleted file mode 100644
index 2d29213737..0000000000
--- a/files/zh-cn/web/api/audiocontext/createbuffer/index.html
+++ /dev/null
@@ -1,181 +0,0 @@
----
-title: AudioContext.createBuffer()
-slug: Web/API/AudioContext/createBuffer
-tags:
- - 创建音频片段
- - 接口
- - 方法
- - 音频环境
-translation_of: Web/API/BaseAudioContext/createBuffer
----
-<p>音频环境{{ domxref("AudioContext") }} 接口的 <code>createBuffer() 方法用于新建一个空</code>白的 {{ domxref("AudioBuffer") }} 对象,以便用于填充数据,通过 {{ domxref("AudioBufferSourceNode") }} 播放。</p>
-
-<p>更多关于音频片段(Audio Buffer)的细节,请参考{{ domxref("AudioBuffer") }}页面。</p>
-
-<div class="note">
-<p><strong>注意:</strong> <code>createBuffer()</code> 曾被用于接收压缩后的音频数据,并返回被解码的音频,但是这项功能现在已经被移除,因为所有的解码工作应当在主线程中被完成,<code>createBuffer()</code> 阻塞了其他代码的执行。异步方法 <code>decodeAudioData()</code> 能够完成相同的工作 —— 传入一个压缩过的音频(如MP3格式的文件),并直接返回一个可以通过 {{ domxref("AudioBufferSourceNode") }} 播放的 {{ domxref("AudioBuffer") }} 。因此播放诸如MP3等格式的压缩音频时,你应当使用 <code>decodeAudioData() 方法。</code></p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre>AudioContext.createBuffer(Number numOfChannels, Number length, Number sampleRate);</pre>
-
-<h3 id="参数">参数</h3>
-
-<div class="note">
-<p><strong>注意:</strong>如果想深入了解 audio buffers 是如何工作的、这些参数的具体含义,请阅读这篇简短的指南: <a href="/en-US/docs/Web/API/Web_Audio_API/Basic_concepts_behind_Web_Audio_API#Audio_buffers.3A_frames.2C_samples_and_channels">Audio buffers: frames, samples and channels</a>(英)。</p>
-</div>
-
-<dl>
- <dt>numOfChannels</dt>
- <dd>一个定义了 buffer 中包含的声频通道数量的整数。<br>
- 一个标准的实现必须包含至少32个声频通道。</dd>
- <dt> </dt>
- <dt>length</dt>
- <dd>一个代表 buffer 中的样本帧数的整数。</dd>
- <dt>sampleRate</dt>
- <dd>线性音频样本的采样率,即每一秒包含的关键帧的个数。实现过程中必须支持 22050~96000的采样率。</dd>
-</dl>
-
-<p> </p>
-
-<h3 id="返回值">返回值</h3>
-
-<p>一个 {{domxref("AudioBuffer")}}。</p>
-
-<h2 id="示例">示例</h2>
-
-<p>首先,我们将从几个浅显易懂的示例入手,来解释如何使用这些参数:</p>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var buffer = audioCtx.createBuffer(2, 22050, 44100);</pre>
-
-<p>如果你这样调用,你将会得到一个立体声(两个声道)的音频片段(Buffer),当它在一个频率为44100赫兹(这是目前大部分声卡处理声音的频率)的音频环境({{ domxref("AudioContext") }})中播放的时候,会持续0.5秒:22050帧 / 44100赫兹 = 0.5 秒。</p>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var buffer = audioCtx.createBuffer(1, 22050, 22050);</pre>
-
-<p>如果你这样调用,你将会得到一个单声道的音频片段(Buffer),当它在一个频率为44100赫兹的音频环境({{ domxref("AudioContext") }})中播放的时候,将会被自动按照44100赫兹*重采样*(因此也会转化为44100赫兹的片段),并持续1秒:44100帧 / 44100赫兹 = 1秒。</p>
-
-<div class="note">
-<p><strong>注意:</strong> <font face="Consolas, Liberation Mono, Courier, monospace">音频重采样与图片的缩放非常类似:比如你有一个</font>16 x 16的图像,但是你想把它填充到一个32 x 32大小的区域,你就要对它进行缩放(重采样)。得到的结果会是一个叫低品质的(图像会模糊或者有锯齿形的边缘,这取决于缩放采用的算法),但它却是能将原图形缩放,并且缩放后的图像占用空间比相同大小的普通图像要小。重新采样的音频道理相同——你会介于一些空间,但事实上你无法产出高频率的声音(高音区)。</p>
-</div>
-
-<p>现在让我们来看一个更加复杂的示例,我们将创建一个时长2秒的音频片段,并用白噪声填充它,之后通过一个 音频片段源节点({{ domxref("AudioBufferSourceNode") }}) 播放。代码中的注释应该能充分解释发生了什么。你可以 <a href="http://mdn.github.io/audio-buffer/">在线演示</a> ,或者 <a href="https://github.com/mdn/audio-buffer">查看源代码</a> 。</p>
-
-<pre class="brush: js;highlight[13]">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var button = document.querySelector('button');
-var pre = document.querySelector('pre');
-var myScript = document.querySelector('script');
-
-pre.innerHTML = myScript.innerHTML;
-
-// 立体声
-var channels = 2;
-// 创建一个 采样率与音频环境(AudioContext)相同的 时长2秒的 音频片段。
-var frameCount = audioCtx.sampleRate * 2.0;
-
-var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
-
-button.onclick = function() {
- // 使用白噪声填充;
- // 就是 -1.0 到 1.0 之间的随机数
- for (var channel = 0; channel &lt; channels; channel++) {
- // 这允许我们读取实际音频片段(AudioBuffer)中包含的数据
- var nowBuffering = myArrayBuffer.getChannelData(channel);
- for (var i = 0; i &lt; frameCount; i++) {
- // Math.random() is in [0; 1.0]
- // audio needs to be in [-1.0; 1.0]
- nowBuffering[i] = Math.random() * 2 - 1;
- }
- }
-
- // 获取一个 音频片段源节点(AudioBufferSourceNode)。
- // 当我们想播放音频片段时,我们会用到这个源节点。
- var source = audioCtx.createBufferSource();
- // 把刚才生成的片段加入到 音频片段源节点(AudioBufferSourceNode)。
- source.buffer = myArrayBuffer;
- // 把 音频片段源节点(AudioBufferSourceNode) 连接到
- // 音频环境(AudioContext) 的终节点,这样我们就能听到声音了。
- source.connect(audioCtx.destination);
- // 开始播放声源
- source.start();
-}</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">规范</th>
- <th scope="col">现状</th>
- <th scope="col">备注</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createBuffer-AudioBuffer-unsigned-long-numberOfChannels-unsigned-long-length-float-sampleRate', 'createBuffer()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0 {{property_prefix("webkit")}}<br>
- 22</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="相关链接">相关链接</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">使用网络音频接口(英文)</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createbuffersource/index.html b/files/zh-cn/web/api/audiocontext/createbuffersource/index.html
deleted file mode 100644
index 5244513312..0000000000
--- a/files/zh-cn/web/api/audiocontext/createbuffersource/index.html
+++ /dev/null
@@ -1,150 +0,0 @@
----
-title: AudioContext.createBufferSource()
-slug: Web/API/AudioContext/createBufferSource
-tags:
- - API
- - 音源
- - 音频源
- - 音频节点
-translation_of: Web/API/BaseAudioContext/createBufferSource
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p><code>createBufferSource()</code> 方法用于创建一个新的{{ domxref("AudioBufferSourceNode") }}接口, 该接口可以通过{{ domxref("AudioBuffer") }} 对象来播放音频数据. {{ domxref("AudioBuffer") }}对象可以通过{{domxref("AudioContext.createBuffer")}} 来创建或者通过 {{domxref("AudioContext.decodeAudioData")}}成功解码音轨后获取.</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var source = audioCtx.createBufferSource();</pre>
-
-<h2 id="返回">返回</h2>
-
-<p>一个{{domxref("AudioBufferSourceNode")}}对象.</p>
-
-<h2 id="例子">例子</h2>
-
-<p>在这个例子中, 我们将会创建一个2秒的缓冲器,并用白噪音填充它, 然后通过{{ domxref("AudioBufferSourceNode") }}来播放它. </p>
-
-<div class="note">
-<p><strong>Note</strong>: You can also <a href="http://mdn.github.io/audio-buffer/">run the code live</a>, or <a href="https://github.com/mdn/audio-buffer">view the source</a>.</p>
-</div>
-
-<pre class="brush: js;highlight[31]">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var button = document.querySelector('button');
-var pre = document.querySelector('pre');
-var myScript = document.querySelector('script');
-
-pre.innerHTML = myScript.innerHTML;
-
-// Stereo
-var channels = 2;
-// Create an empty two second stereo buffer at the
-// sample rate of the AudioContext
-var frameCount = audioCtx.sampleRate * 2.0;
-
-var myArrayBuffer = audioCtx.createBuffer(2, frameCount, audioCtx.sampleRate);
-
-button.onclick = function() {
- // Fill the buffer with white noise;
- //just random values between -1.0 and 1.0
- for (var channel = 0; channel &lt; channels; channel++) {
- // This gives us the actual ArrayBuffer that contains the data
- var nowBuffering = myArrayBuffer.getChannelData(channel);
- for (var i = 0; i &lt; frameCount; i++) {
- // Math.random() is in [0; 1.0]
- // audio needs to be in [-1.0; 1.0]
- nowBuffering[i] = Math.random() * 2 - 1;
- }
- }
-
- // Get an AudioBufferSourceNode.
- // This is the AudioNode to use when we want to play an AudioBuffer
- var source = audioCtx.createBufferSource();
- // set the buffer in the AudioBufferSourceNode
- source.buffer = myArrayBuffer;
- // connect the AudioBufferSourceNode to the
- // destination so we can hear the sound
- source.connect(audioCtx.destination);
- // start the source playing
- source.start();
-}</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createBufferSource-AudioBufferSourceNode', 'createBufferSource()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器支持">浏览器支持</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="See_also">See also</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html b/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html
deleted file mode 100644
index 281dcddfe7..0000000000
--- a/files/zh-cn/web/api/audiocontext/createchannelmerger/index.html
+++ /dev/null
@@ -1,143 +0,0 @@
----
-title: AudioContext.createChannelMerger()
-slug: Web/API/AudioContext/createChannelMerger
-tags:
- - API
- - Audio
- - AudioContext
- - Audio_Chinese
-translation_of: Web/API/BaseAudioContext/createChannelMerger
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>AudioContext.<code>createChannelMerger()方法,会创建一个</code>ChannelMergerNode,后者可以把多个音频流的通道整合到一个音频流。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var merger = audioCtx.createChannelMerger(2);</pre>
-
-<h3 id="参数">参数</h3>
-
-<dl>
- <dt>numberOfInputs</dt>
- <dd>通道输入音频流的数量,输出流将包含这个数量的通道。默认值6。</dd>
-</dl>
-
-<h3 id="返回值">返回值</h3>
-
-<p>一个 {{domxref("ChannelMergerNode")}}.</p>
-
-<h2 id="(举个)栗(例)子">(举个)栗(例)子</h2>
-
-<p>下面的例子展示了如何分离立体音轨(就是一段音乐),处理使左右声道不同。使用的时候,需要指定AudioNode.connect(AudioNode)方法的第二个和第三个参数,分别用来指定通道链接来源的索引和输出的索引。</p>
-
-<pre class="brush: js;highlight[7,16,17,24]">var ac = new AudioContext();
-ac.decodeAudioData(someStereoBuffer, function(data) {
- var source = ac.createBufferSource();
- source.buffer = data;
- var splitter = ac.createChannelSplitter(2);
- source.connect(splitter);
- var merger = ac.createChannelMerger(2);
-
- // Reduce the volume of the left channel only
- var gainNode = ac.createGain();
- gainNode.gain.value = 0.5;
- splitter.connect(gainNode, 0);
-
- // Connect the splitter back to the second input of the merger: we
- // effectively swap the channels, here, reversing the stereo image.
- gainNode.connect(merger, 0, 1);
- splitter.connect(merger, 1, 0);
-
- var dest = ac.createMediaStreamDestination();
-
- // Because we have used a ChannelMergerNode, we now have a stereo
- // MediaStream we can use to pipe the Web Audio graph to WebRTC,
- // MediaRecorder, etc.
- merger.connect(dest);
-});</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createChannelMerger-ChannelMergerNode-unsigned-long-numberOfInputs', 'createChannelMerger()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="相关页面">相关页面</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html b/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html
deleted file mode 100644
index f46f5be2c5..0000000000
--- a/files/zh-cn/web/api/audiocontext/createchannelsplitter/index.html
+++ /dev/null
@@ -1,138 +0,0 @@
----
-title: AudioContext.createChannelSplitter()
-slug: Web/API/AudioContext/createChannelSplitter
-translation_of: Web/API/BaseAudioContext/createChannelSplitter
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>The <code>createChannelSplitter()</code> method of the {{ domxref("AudioContext") }} Interface is used to create a {{domxref("ChannelSplitterNode")}}, which is used to access the individual channels of an audio stream and process them separately.</p>
-</div>
-
-<h2 id="Syntax">Syntax</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var splitter = audioCtx.createChannelSplitter(2);</pre>
-
-<h3 id="参数">参数</h3>
-
-<dl>
- <dt>numberOfOutputs</dt>
- <dd>你期待将输入音频分割成的声道道数目; 当不传入参数时,默认为6</dd>
-</dl>
-
-<h3 id="Returns">Returns</h3>
-
-<p>一个 {{domxref("ChannelSplitterNode")}}.</p>
-
-<h2 id="Example">Example</h2>
-
-<p>下面这个简单的例子告诉你怎样分割一个双声道音轨 (或者说一段音乐), 以及对于左右声道不同的处理. 要使用它们, 你需要用到{{domxref("AudioNode.connect(AudioNode)") }}方法的第二个和第三个参数, 他们会指定链接声道源的序号和链接到的声道序号.</p>
-
-<pre class="brush: js;highlight[5,12,17]">var ac = new AudioContext();
-ac.decodeAudioData(someStereoBuffer, function(data) {
- var source = ac.createBufferSource();
- source.buffer = data;
- var splitter = ac.createChannelSplitter(2);
- source.connect(splitter);
- var merger = ac.createChannelMerger(2);
-
- // Reduce the volume of the left channel only
- var gainNode = ac.createGain();
- gainNode.gain.value = 0.5;
- splitter.connect(gainNode, 0);
-
- // Connect the splitter back to the second input of the merger: we
- // effectively swap the channels, here, reversing the stereo image.
- gainNode.connect(merger, 0, 1);
- splitter.connect(merger, 1, 0);
-
- var dest = ac.createMediaStreamDestination();
-
- // Because we have used a ChannelMergerNode, we now have a stereo
- // MediaStream we can use to pipe the Web Audio graph to WebRTC,
- // MediaRecorder, etc.
- merger.connect(dest);
-});</pre>
-
-<h2 id="规格">规格</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">规格</th>
- <th scope="col">状态</th>
- <th scope="col">注释</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createChannelSplitter-ChannelSplitterNode-unsigned-long-numberOfOutputs', 'createChannelSplitter()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createconvolver/index.html b/files/zh-cn/web/api/audiocontext/createconvolver/index.html
deleted file mode 100644
index 2cbe395edc..0000000000
--- a/files/zh-cn/web/api/audiocontext/createconvolver/index.html
+++ /dev/null
@@ -1,131 +0,0 @@
----
-title: AudioContext.createConvolver()
-slug: Web/API/AudioContext/createConvolver
-translation_of: Web/API/BaseAudioContext/createConvolver
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的方法<code>createConvolver()能创建一个</code>{{ domxref("ConvolverNode") }},通常用来对你的音频应用混响效果。在 <a href="http://webaudio.github.io/web-audio-api/#background-3">Convolution规范定义</a> 中查看更多信息。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var convolver = audioCtx.createConvolver();</pre>
-
-<h3 id="Description" name="Description">返回值</h3>
-
-<p>{{domxref("ConvolverNode")}}对象。</p>
-
-<h2 id="Examples" name="Examples">例子</h2>
-
-<p>下面的例子展示了一个 AudioContext 创建一个 混响器节点 的基本使用方法。基本前提是你创建一个包含声音样本的 AudioBuffer 用作混响环境 (称之为<em>脉冲响应</em>,) 和在混响器中应用。 下面的例子使用了一个简短的示例音乐厅人群效果,所以混响效果应用深度和回声。</p>
-
-<p>更多完整例子请查看<a href="http://mdn.github.io/voice-change-o-matic/">Voice-change-O-matic demo</a> (中<a href="https://github.com/mdn/voice-change-o-matic/blob/gh-pages/scripts/app.js">app.js的</a>代码)。</p>
-
-<pre class="brush: js">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var convolver = audioCtx.createConvolver();
-
- ...
-
-// grab audio track via XHR for convolver node
-
-var soundSource, concertHallBuffer;
-
-ajaxRequest = new XMLHttpRequest();
-ajaxRequest.open('GET', 'concert-crowd.ogg', true);
-ajaxRequest.responseType = 'arraybuffer';
-
-ajaxRequest.onload = function() {
-  var audioData = ajaxRequest.response;
-  audioCtx.decodeAudioData(audioData, function(buffer) {
-      concertHallBuffer = buffer;
-      soundSource = audioCtx.createBufferSource();
-      soundSource.buffer = concertHallBuffer;
-    }, function(e){"Error with decoding audio data" + e.err});
-}
-
-ajaxRequest.send();
-
- ...
-
-convolver.buffer = concertHallBuffer;</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createConvolver-ConvolverNode', 'createConvolver()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容">浏览器兼容</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createdelay/index.html b/files/zh-cn/web/api/audiocontext/createdelay/index.html
deleted file mode 100644
index b8e502758d..0000000000
--- a/files/zh-cn/web/api/audiocontext/createdelay/index.html
+++ /dev/null
@@ -1,213 +0,0 @@
----
-title: AudioContext.createDelay()
-slug: Web/API/AudioContext/createDelay
-translation_of: Web/API/BaseAudioContext/createDelay
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>  <code>createDelay() 是 </code> {{ domxref("AudioContext") }}   的一个方法,作用是将输入音频信号延迟一定时间。(比如可以实现 对着话筒说句话,然后几秒后 这句话从音响里播放出来)</p>
-
-<p> </p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var synthDelay = audioCtx.createDelay(<em>maxDelayTime</em>);</pre>
-
-<h3 id="参数">参数</h3>
-
-<dl>
- <dt><em>maxDelayTime</em></dt>
- <dd>设置最大允许延迟的时间,以“秒”为单位</dd>
-</dl>
-
-<h3 id="返回">返回</h3>
-
-<p>A {{domxref("DelayNode")}}. The default {{domxref("DelayNode.delayTime")}} if no parameter is passed to <code>createDelay()</code> is 0 seconds.</p>
-
-<p>以上是原文,大意是返回延时时间,没有设置时默认是0</p>
-
-<p> </p>
-
-<h2 id="示例">示例</h2>
-
-<p>首先是中文版的简洁的示例,这个例子中 话筒里接收到的声音 会延迟3秒 从音响中播放</p>
-
-<pre>window.AudioContext = window.AudioContext || window.webkitAudioContext || window.mozAudioContext || window.msAudioContext;
-
-<code>try {//音频相关api
-    var audioContext = new window.AudioContext();
-</code> var synthDelay = <code>audioContext</code>.createDelay(5.0);
-<code>} catch (e) {
-    alert("你浏览器不支持");
-}
-
-
-var error = function (error) {alert("有错误"); };
-
-//以下是获取麦克风
-if (navigator.getUserMedia) { //标准api
- navigator.getUserMedia({ "audio": true },
- function (stream) {
- micto(stream); //具体工作
-  }, error);
-}else if(navigator.webkitGetUserMedia) { //webkit api
- navigator.webkitGetUserMedia({audio:true, video:  false },
- function (stream) {
- micto(stream); //具体工作
-  }, error);
- }else if (navigator.mozGetUserMedia) { //火狐 api
- navigator.mozGetUserMedia({ "audio": true },
- function (stream) {
- micto(stream);//具体工作
-                }, error);
- }else if (navigator.msGetUserMedia) { //ie api
- navigator.msGetUserMedia({ "audio": true },
- function (stream) {
- micto(stream);//具体工作
-                }, error);
- } else {
-   alert("您的浏览器版不支持这个api");
-}
-
-
-
-
-
- var micto = function(stream) {
-
- synthDelay.delayTime.value = 3.0; //延迟3秒
-
-  var source = audioContext.createMediaStreamSource(stream);
-
-  source.connect(synthDelay);
-
-  synthDelay.connect(audioContext.destination);
-
-      }
- </code></pre>
-
-<p> </p>
-
-<p> 以下是英文版示例</p>
-
-<p>We have created a simple example that allows you to play three different samples on a constant loop — see <a href="http://chrisdavidmills.github.io/create-delay/">create-delay</a> (you can also <a href="https://github.com/chrisdavidmills/create-delay">view the source code</a>). If you just press the play buttons, the loops will start immediately; if you slide the sliders up to the right, then press the play buttons, a delay will be introduced, so the looping sounds don't start playing for a short amount of time.</p>
-
-<pre class="brush: js;highlight[4,15,16,21,22]">var AudioContext = window.AudioContext || window.webkitAudioContext;
-var audioCtx = new AudioContext();
-
-var synthDelay = audioCtx.createDelay(5.0);
-
- ...
-
-var synthSource;
-
-playSynth.onclick = function() {
- synthSource = audioCtx.createBufferSource();
- synthSource.buffer = buffers[2];
- synthSource.loop = true;
- synthSource.start();
- synthSource.connect(synthDelay);
- synthDelay.connect(destination);
- this.setAttribute('disabled', 'disabled');
-}
-
-stopSynth.onclick = function() {
- synthSource.disconnect(synthDelay);
- synthDelay.disconnect(destination);
- synthSource.stop();
- playSynth.removeAttribute('disabled');
-}
-
-...
-
-var delay1;
-rangeSynth.oninput = function() {
-delay1 = rangeSynth.value;
-synthDelay.delayTime.value = delay1;
-}
-</pre>
-
-<h2 id="Specifications">Specifications</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createDelay-DelayNode-double-maxDelayTime', 'createDelay()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="Browser_compatibility">Browser compatibility</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="See_also">See also</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html b/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html
deleted file mode 100644
index 7e505bc06a..0000000000
--- a/files/zh-cn/web/api/audiocontext/createscriptprocessor/index.html
+++ /dev/null
@@ -1,199 +0,0 @@
----
-title: AudioContext.createScriptProcessor()
-slug: Web/API/AudioContext/createScriptProcessor
-translation_of: Web/API/BaseAudioContext/createScriptProcessor
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }} 接口的<code>createScriptProcessor()</code> 方法创建一个{{domxref("ScriptProcessorNode")}} 用于通过JavaScript直接处理音频.</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-myScriptProcessor = audioCtx.createScriptProcessor(<code>bufferSize</code>, <code>numberOfInputChannels</code>, <code>numberOfOutputChannels</code>);</pre>
-
-<h3 id="Parameters" name="Parameters">参数</h3>
-
-<dl>
- <dt><code>bufferSize</code></dt>
- <dd>缓冲区大小,以样本帧为单位。具体来讲,缓冲区大小必须是下面这些值当中的某一个: 256, 512, 1024, 2048, 4096, 8192, 16384. 如果不传,或者参数为0,则取当前环境最合适的缓冲区大小, 取值为2的幂次方的一个常数,在该node的整个生命周期中都不变.</dd>
- <dd>该取值控制着<code>audioprocess事件被分派的频率,以及每一次调用多少样本帧被处理</code>. 较低bufferSzie将导致一定的延迟。较高的bufferSzie就要注意避免音频的崩溃和故障。推荐作者不要给定具体的缓冲区大小,让系统自己选一个好的值来平衡延迟和音频质量。</dd>
- <dt><code>numberOfInputChannels</code></dt>
- <dd>值为整数,用于指定输入node的声道的数量,默认值是2,最高能取32.</dd>
- <dt><code>numberOfOutputChannels</code></dt>
- <dd>值为整数,用于指定输出node的声道的数量,默认值是2,最高能取32.</dd>
-</dl>
-
-<div class="warning">
-<p><span style="font-size: 14px;"><strong>重要</strong></span>: Webkit (version 31)要求调用这个方法的时候必须传入一个有效的bufferSize .</p>
-</div>
-
-<div class="note">
-<p><span style="font-size: 14px;"><strong>注意</strong></span>: <code>numberOfInputChannels<font face="Open Sans, Arial, sans-serif">和</font></code><code>numberOfOutputChannels的值不能同时为0,二者同时为0是无效的</code></p>
-</div>
-
-<h3 id="Description" name="Description">返回</h3>
-
-<p>A {{domxref("ScriptProcessorNode")}}.</p>
-
-<h2 id="示例">示例</h2>
-
-<p><code><font face="Open Sans, Arial, sans-serif">下面的例子展示了一个</font>ScriptProcessorNode的基本用法,数据源取自</code> {{ domxref("AudioContext.decodeAudioData") }}, 给每一个音频样本加一点白噪声,然后通过{{domxref("AudioDestinationNode")}}播放(其实这个就是系统的扬声器)。 对于每一个声道和样本帧,在把结果当成输出样本之前,<code>scriptNode.onaudioprocess方法<font face="Open Sans, Arial, sans-serif">关联</font></code><code>audioProcessingEvent</code> ,并用它来遍历每输入流的每一个声道,和每一个声道中的每一个样本,并添加一点白噪声。</p>
-
-<div class="note">
-<p><span style="font-size: 14px;"><strong>注意</strong></span>: 完整的示例参照 <a href="https://mdn.github.io/webaudio-examples/script-processor-node/">script-processor-node</a> github (查看源码 <a href="https://github.com/mdn/webaudio-examples/blob/master/script-processor-node/index.html">source code</a>.)</p>
-</div>
-
-<pre class="brush: js">var myScript = document.querySelector('script');
-var myPre = document.querySelector('pre');
-var playButton = document.querySelector('button');
-
-// Create AudioContext and buffer source
-var audioCtx = new AudioContext();
-source = audioCtx.createBufferSource();
-
-// Create a ScriptProcessorNode with a bufferSize of 4096 and a single input and output channel
-var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
-console.log(scriptNode.bufferSize);
-
-// load in an audio track via XHR and decodeAudioData
-
-function getData() {
-  request = new XMLHttpRequest();
-  request.open('GET', 'viper.ogg', true);
-  request.responseType = 'arraybuffer';
-  request.onload = function() {
-    var audioData = request.response;
-
-    audioCtx.decodeAudioData(audioData, function(buffer) {
-    myBuffer = buffer;
-    source.buffer = myBuffer;
-  },
-    function(e){"Error with decoding audio data" + e.err});
-  }
-  request.send();
-}
-
-// Give the node a function to process audio events
-scriptNode.onaudioprocess = function(audioProcessingEvent) {
- // The input buffer is the song we loaded earlier
-  var inputBuffer = audioProcessingEvent.inputBuffer;
-
-  // The output buffer contains the samples that will be modified and played
-  var outputBuffer = audioProcessingEvent.outputBuffer;
-
-  // Loop through the output channels (in this case there is only one)
-  for (var channel = 0; channel &lt; outputBuffer.numberOfChannels; channel++) {
-    var inputData = inputBuffer.getChannelData(channel);
-    var outputData = outputBuffer.getChannelData(channel);
-
-    // Loop through the 4096 samples
-    for (var sample = 0; sample &lt; inputBuffer.length; sample++) {
-    // make output equal to the same as the input
-      outputData[sample] = inputData[sample];
-
-      // add noise to each output sample
-      outputData[sample] += ((Math.random() * 2) - 1) * 0.2;
-    }
-  }
-}
-
-getData();
-
-// wire up play button
-playButton.onclick = function() {
- source.connect(scriptNode);
-  scriptNode.connect(audioCtx.destination);
-  source.start();
-}
-
-// When the buffer source stops playing, disconnect everything
-source.onended = function() {
- source.disconnect(scriptNode);
-  scriptNode.disconnect(audioCtx.destination);
-}
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels', 'createScriptProcessor')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td></td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="See_also">See also</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html b/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html
deleted file mode 100644
index 7aef8d5688..0000000000
--- a/files/zh-cn/web/api/audiocontext/createwaveshaper/index.html
+++ /dev/null
@@ -1,133 +0,0 @@
----
-title: AudioContext.createWaveShaper()
-slug: Web/API/AudioContext/createWaveShaper
-translation_of: Web/API/BaseAudioContext/createWaveShaper
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<p>{{ domxref("AudioContext") }} 接口的createWaveShaper()方法创建了 表示非线性失真的{{ domxref("WaveShaperNode") }}。该节点通常被用来给音频添加失真效果</p>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var distortion = audioCtx.createWaveShaper();</pre>
-
-<h3 id="Description" name="Description">返回</h3>
-
-<p>A {{domxref("WaveShaperNode")}}.</p>
-
-<h2 id="Example" name="Example">例子</h2>
-
-<p>The following example shows basic usage of an AudioContext to create a wave shaper node. For applied examples/information, check out our <a href="http://mdn.github.io/voice-change-o-matic/">Voice-change-O-matic</a><a href="http://mdn.github.io/voice-change-o-matic/"> demo</a> (<a href="https://github.com/mdn/voice-change-o-matic/blob/gh-pages/scripts/app.js">see app.js</a> for relevant code).</p>
-
-<p>下面的例子展示了AudioContext创建一个波形整形器节点的基本用法。有关应用示例/信息,请查看我们的<a href="http://mdn.github.io/voice-change-o-matic/">oice-change-O-matic</a><a href="http://mdn.github.io/voice-change-o-matic/"> demo</a>演示(有关代码,请参阅<a href="https://github.com/mdn/voice-change-o-matic/blob/gh-pages/scripts/app.js">app.js</a>)。</p>
-
-<div class="note">
-<p><strong>注</strong>:实现失真曲线并不是简单的事情,你可能需要到处找资料来找到这样的算法。我们在<a href="http://stackoverflow.com/questions/22312841/waveshaper-node-in-webaudio-how-to-emulate-distortion">Stack Overflow</a>上找到了以下的失真曲线代码</p>
-</div>
-
-<pre class="brush: js">var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var distortion = audioCtx.createWaveShaper();
-
- ...
-
-function makeDistortionCurve(amount) {
-  var k = typeof amount === 'number' ? amount : 50,
-    n_samples = 44100,
-    curve = new Float32Array(n_samples),
-    deg = Math.PI / 180,
-    i = 0,
-    x;
-  for ( ; i &lt; n_samples; ++i ) {
-    x = i * 2 / n_samples - 1;
-    curve[i] = ( 3 + k ) * x * 20 * deg / ( Math.PI + k * Math.abs(x) );
-  }
-  return curve;
-};
-
- ...
-
-distortion.curve = makeDistortionCurve(400);
-distortion.oversample = '4x';</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-createWaveShaper-WaveShaperNode', 'createWaveShaper()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Edge</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Edge</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="See_also">See also</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/currenttime/index.html b/files/zh-cn/web/api/audiocontext/currenttime/index.html
deleted file mode 100644
index fbdaf4315c..0000000000
--- a/files/zh-cn/web/api/audiocontext/currenttime/index.html
+++ /dev/null
@@ -1,112 +0,0 @@
----
-title: AudioContext.currentTime
-slug: Web/API/AudioContext/currentTime
-translation_of: Web/API/BaseAudioContext/currentTime
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p><code>currentTime是</code>{{ domxref("AudioContext") }}的一个read-only<code>属性,返回</code>double秒(从0开始)表示一个只增不减的硬件时间戳,可以用来控制音频回放,实现可视化时间轴等等。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-console.log(audioCtx.currentTime);</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>A double.</p>
-
-<h2 id="例子">例子</h2>
-
-<div class="note">
-<p><strong>注意</strong>:想要<em>完整的</em><em>Web Audio例子的话,</em>可以去<a href="https://github.com/mdn/" style="font-style: italic; background-color: rgba(231, 228, 157, 0.247059);">MDN Github repo</a>看DEMO(例如<a href="https://github.com/mdn/panner-node" style="line-height: 1.5; text-decoration: underline;">panner-node</a>)<span style="line-height: 1.5;">。</span><em>不妨试试在浏览器控制台输入</em><code style="font-style: italic; line-height: 1.5;">audioCtx.currentTime。</code></p>
-</div>
-
-<pre class="brush: js;highlight[8]">var AudioContext = window.AudioContext || window.webkitAudioContext;
-var audioCtx = new AudioContext();
-// Older webkit/blink browsers require a prefix
-
-...
-
-console.log(audioCtx.currentTime);
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-currentTime', 'currentTime')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html b/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html
deleted file mode 100644
index 40693fd8cc..0000000000
--- a/files/zh-cn/web/api/audiocontext/decodeaudiodata/index.html
+++ /dev/null
@@ -1,223 +0,0 @@
----
-title: AudioContext.decodeAudioData()
-slug: Web/API/AudioContext/decodeAudioData
-tags:
- - API
- - Audio
- - audio接口
- - 音频解码
-translation_of: Web/API/BaseAudioContext/decodeAudioData
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}接口的<code>decodeAudioData()方法可用于异步解码</code>音频文件中的 {{domxref("ArrayBuffer")}}. <code>ArrayBuffer数据可以通过</code>{{domxref("XMLHttpRequest")}}和{{domxref("FileReader")}}来获取. AudioBuffer是通过AudioContext采样率进行解码的,然后通过回调返回结果.</p>
-</div>
-
-<p>这是从音频轨道创建用于web audio API音频源的首选方法。</p>
-
-<h2 id="语法">语法</h2>
-
-<p>旧版的回调函数语法</p>
-
-<pre class="syntaxbox">audioCtx.decodeAudioData(audioData, function(decodedData) {
- // use the dec​oded data here
-});</pre>
-
-<p>新版的promise-based语法:</p>
-
-<pre class="syntaxbox">audioCtx.decodeAudioData(audioData).then(function(decodedData) {
- // use the decoded data here
-});</pre>
-
-<h2 id="举例">举例</h2>
-
-<p>在本章节中,我们将首先学习基于回调的系统,然后采用新的基于promise-based的语法</p>
-
-<h3 id="旧的回调语法">旧的回调语法</h3>
-
-<p>在这个事例中, <code>getData()</code> 方法使用XHR加载一个音轨,设置请求的responsetype为ArrayBuffer使它返回一个arraybuffer数据,然后存储在audioData变量中. 然后我们将这个arraybuffer数据置于<code>decodeAudioData()方法中使用,当成功解码PCM Data后通过回调返回</code>, 将返回的结果通过{{ domxref("AudioContext.createBufferSource()") }}接口进行处理并获得一个{{ domxref("AudioBufferSourceNode") }}, 将源连接至{{domxref("AudioContext.destination") }}并将它设置为循环的.</p>
-
-<p>通过按钮来运行 <code>getData()</code> 来获取音轨并播放它. 当使用 <code>stop()</code> 方法后source将会被清除.</p>
-
-<div class="note">
-<p><strong>Note</strong>: You can <a href="http://mdn.github.io/decode-audio-data/">run the example live</a> (or <a href="https://github.com/mdn/decode-audio-data">view the source</a>.)</p>
-</div>
-
-<pre class="brush: js">// define variables
-
-var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
-var source;
-
-var pre = document.querySelector('pre');
-var myScript = document.querySelector('script');
-var play = document.querySelector('.play');
-var stop = document.querySelector('.stop');
-
-// use XHR to load an audio track, and
-// decodeAudioData to decode it and stick it in a buffer.
-// Then we put the buffer into the source
-
-function getData() {
- source = audioCtx.createBufferSource();
- var request = new XMLHttpRequest();
-
- request.open('GET', 'viper.ogg', true);
-
- request.responseType = 'arraybuffer';
-
-
- request.onload = function() {
- var audioData = request.response;
-
- audioCtx.decodeAudioData(audioData, function(buffer) {
- source.buffer = buffer;
-
- source.connect(audioCtx.destination);
- source.loop = true;
- },
-
- function(e){"Error with decoding audio data" + e.err});
-
- }
-
- request.send();
-}
-
-// wire up buttons to stop and play audio
-
-play.onclick = function() {
-  getData();
-  source.start(0);
-  play.setAttribute('disabled', 'disabled');
-}
-
-stop.onclick = function() {
-  source.stop(0);
-  play.removeAttribute('disabled');
-}
-
-
-// dump script to pre element
-
-pre.innerHTML = myScript.innerHTML;</pre>
-
-<h3 id="新的promise-based语法">新的promise-based语法</h3>
-
-<pre class="brush: js">ctx.decodeAudioData(compressedBuffer).then(function(decodedData) {
- // use the decoded data here
-});</pre>
-
-<h2 id="参数">参数</h2>
-
-<dl>
- <dt>ArrayBuffer</dt>
- <dd>将会被解码的音频数据,可通过{{domxref("XMLHttpRequest")}}或{{domxref("FileReader")}}来获取.</dd>
- <dt>DecodeSuccessCallback</dt>
- <dd>当成功解码后会被调用的回调函数. 该回调函数只有一个AudioBuffer类型参数.</dd>
- <dt>DecodeErrorCallback</dt>
- <dd>一个可选的错误回调函数.</dd>
-</dl>
-
-<h2 id="返回">返回</h2>
-
-<p>一个 {{domxref("Promise") }}对象.</p>
-
-<h2 id="标准">标准</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-decodeAudioData-Promise-AudioBuffer--ArrayBuffer-audioData-DecodeSuccessCallback-successCallback-DecodeErrorCallback-errorCallback', 'decodeAudioData()')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器支持">浏览器支持</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- <tr>
- <td>Promise-based syntax</td>
- <td>{{CompatChrome(49.0)}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatNo}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Android Webview</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatChrome(33.0)}}</td>
- </tr>
- <tr>
- <td>Promise-based syntax</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatChrome(49.0)}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatVersionUnknown}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatChrome(49.0)}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="See_also">See also</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/destination/index.html b/files/zh-cn/web/api/audiocontext/destination/index.html
deleted file mode 100644
index 04fdfe8247..0000000000
--- a/files/zh-cn/web/api/audiocontext/destination/index.html
+++ /dev/null
@@ -1,114 +0,0 @@
----
-title: AudioContext.destination
-slug: Web/API/AudioContext/destination
-translation_of: Web/API/BaseAudioContext/destination
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>destination属性返回一个</code>{{ domxref("AudioDestinationNode") }}表示context中所有音频(节点)的最终目标节点,一般是音频渲染设备,比如扬声器。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-gainNode.connect(audioCtx.destination);</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>An {{ domxref("AudioDestinationNode") }}.</p>
-
-<h2 id="例子">例子</h2>
-
-<div class="note">
-<p><strong>注意</strong>:想要完整的例子,可以去看看<a href="https://github.com/mdn/">MDN Github repo</a>的DEMO,比如<a href="https://github.com/mdn/panner-node">panner-node</a></p>
-</div>
-
-<pre class="brush: js;highlight[8]">var AudioContext = window.AudioContext || window.webkitAudioContext;
-var audioCtx = new AudioContext();
-// Older webkit/blink browsers require a prefix
-
-var oscillatorNode = audioCtx.createOscillator();
-var gainNode = audioCtx.createGain();
-
-oscillatorNode.connect(gainNode);
-gainNode.connect(audioCtx.destination);
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-destination', 'destination')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/listener/index.html b/files/zh-cn/web/api/audiocontext/listener/index.html
deleted file mode 100644
index 81b2a730a2..0000000000
--- a/files/zh-cn/web/api/audiocontext/listener/index.html
+++ /dev/null
@@ -1,112 +0,0 @@
----
-title: AudioContext.listener
-slug: Web/API/AudioContext/listener
-translation_of: Web/API/BaseAudioContext/listener
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>listener属性</code>返回一个{{ domxref("AudioListener") }} 对象,可以用来实现3D音频空间化。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var myListener = audioCtx.listener;</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>An {{ domxref("AudioListener") }} object.</p>
-
-<h2 id="例子">例子</h2>
-
-<div class="note">
-<p><strong>注意</strong>:想要完整的音频空间化例子,可以查看<a href="https://github.com/mdn/panner-node">panner-node</a> DEMO</p>
-</div>
-
-<pre class="brush: js;highlight[8]">var AudioContext = window.AudioContext || window.webkitAudioContext;
-var audioCtx = new AudioContext();
-// Older webkit/blink browsers require a prefix
-
-...
-
-var myListener = audioCtx.listener;
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-listener', 'listener')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html b/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html
deleted file mode 100644
index 2b7022c1ce..0000000000
--- a/files/zh-cn/web/api/audiocontext/mozaudiochanneltype/index.html
+++ /dev/null
@@ -1,95 +0,0 @@
----
-title: AudioContext.mozAudioChannelType
-slug: Web/API/AudioContext/mozAudioChannelType
-translation_of: Web/API/AudioContext/mozAudioChannelType
----
-<p>{{APIRef("Web Audio API")}} {{Non-standard_header}}</p>
-
-<p>{{domxref("AudioContext")}}的<code>mozAudioChannelType</code>属性是只读的,在Firefox OS设备上可以用来设置音频在audio context中播放的声道。</p>
-
-<p>该属性是<a href="/en-US/docs/Web/API/AudioChannels_API">AudioChannels API</a>中定义的非标准属性,更多信息请查看<a href="https://developer.mozilla.org/en-US/docs/Web/API/AudioChannels_API/Using_the_AudioChannels_API">Using the AudioChannels API</a></p>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var myAudioChannelType = audioCtx.mozAudioChannelType;
-</pre>
-
-<p>只能通过下面的构造器来设置AudioContext中音频的声道:</p>
-
-<pre class="brush: js">var audioCtx = new AudioContext('ringer');</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>A {{domxref("DOMString")}} value.</p>
-
-<h2 id="例子">例子</h2>
-
-<p>TBD</p>
-
-<h2 id="规范">规范</h2>
-
-<p>AudioChannels API目前没有官方规范,实现细节请查看<a href="https://wiki.mozilla.org/WebAPI/AudioChannels">https://wiki.mozilla.org/WebAPI/AudioChannels</a>、WebIDL等等</p>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>General support</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Chrome</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Phone</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- </tr>
- <tr>
- <td>General support</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>1.2</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- <td>{{CompatNo}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/Apps/Build/App_permissions">App permissions for Firefox OS</a></li>
- <li><a href="/en-US/docs/Web/API/AudioChannels_API/Using_the_AudioChannels_API">Using the AudioChannels API</a></li>
- <li>{{domxref("Navigator.mozAudioChannelManager","navigator.mozAudioChannelManager")}}</li>
- <li>{{domxref("AudioContext")}}</li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/onstatechange/index.html b/files/zh-cn/web/api/audiocontext/onstatechange/index.html
deleted file mode 100644
index ee9b3f21c0..0000000000
--- a/files/zh-cn/web/api/audiocontext/onstatechange/index.html
+++ /dev/null
@@ -1,101 +0,0 @@
----
-title: AudioContext.onstatechange
-slug: Web/API/AudioContext/onstatechange
-translation_of: Web/API/BaseAudioContext/onstatechange
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>onstatechange属性定义了一个事件处理器函数,触发</code>{{Event("statechange")}}会被调用,也就是说audio context的状态发生变化时会执行。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-audioCtx.onstatechange = function() { ... };</pre>
-
-<h2 id="例子">例子</h2>
-
-<p>下面这段代码是<a href="https://github.com/mdn/audiocontext-states/settings">AudioContext states DEMO</a> (<a href="http://mdn.github.io/audiocontext-states/">直接运行</a>)中的,其中<code>onstatechange处理器会在每次</code><span style="font-family: courier,andale mono,monospace;">当前</span>{{domxref("state")}}发生变化时<code>把它</code><span style="line-height: 1.5;">输出到控制台。</span></p>
-
-<pre class="brush: js">audioCtx.onstatechange = function() {
-  console.log(audioCtx.state);
-}
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-onstatechange', 'onstatechange')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(43.0)}}</td>
- <td>{{CompatGeckoDesktop(40.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/samplerate/index.html b/files/zh-cn/web/api/audiocontext/samplerate/index.html
deleted file mode 100644
index b811702e26..0000000000
--- a/files/zh-cn/web/api/audiocontext/samplerate/index.html
+++ /dev/null
@@ -1,112 +0,0 @@
----
-title: AudioContext.sampleRate
-slug: Web/API/AudioContext/sampleRate
-translation_of: Web/API/BaseAudioContext/sampleRate
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>sampleRate属性</code>返回一个浮点数表示采样率(每秒采样数), 同一个AudioContext中的所有节点采样率相同,所以不支持采样率转换。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var mySampleRate = audioCtx.sampleRate;</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>A floating point number.</p>
-
-<h2 id="例子">例子</h2>
-
-<div class="note">
-<p><strong>注意:</strong>想要完整的Web Audio实例,可以查看<a href="https://github.com/mdn/" style="font-style: italic; background-color: rgba(231, 228, 157, 0.247059);">MDN Github repo</a>上的Web Audio Demo,比如<a href="https://github.com/mdn/panner-node">panner-node</a>。不妨试试在浏览器控制台输入<code>audioCtx.sampleRate</code></p>
-</div>
-
-<pre class="brush: js;highlight[8]">var AudioContext = window.AudioContext || window.webkitAudioContext;
-var audioCtx = new AudioContext();
-// Older webkit/blink browsers require a prefix
-
-...
-
-console.log(audioCtx.sampleRate);
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-sampleRate', 'sampleRate')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(10.0)}}{{property_prefix("webkit")}}</td>
- <td>{{CompatGeckoDesktop(25.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>15.0{{property_prefix("webkit")}}<br>
- 22 (unprefixed)</td>
- <td>6.0{{property_prefix("webkit")}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>26.0</td>
- <td>1.2</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>33.0</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>
diff --git a/files/zh-cn/web/api/audiocontext/state/index.html b/files/zh-cn/web/api/audiocontext/state/index.html
deleted file mode 100644
index 97876f5d3d..0000000000
--- a/files/zh-cn/web/api/audiocontext/state/index.html
+++ /dev/null
@@ -1,111 +0,0 @@
----
-title: AudioContext.state
-slug: Web/API/AudioContext/state
-translation_of: Web/API/BaseAudioContext/state
----
-<p>{{ APIRef("Web Audio API") }}</p>
-
-<div>
-<p>{{ domxref("AudioContext") }}的<code>state属性是只读的,</code>返回<code>AudioContext</code>的当前状态。</p>
-</div>
-
-<h2 id="语法">语法</h2>
-
-<pre class="brush: js">var audioCtx = new AudioContext();
-var myState = audioCtx.state;</pre>
-
-<h3 id="返回值">返回值</h3>
-
-<p>{{domxref("DOMString")}},可能的值如下:</p>
-
-<ul>
- <li><code>suspended</code>: audio context被阻塞了(用{{domxref("AudioContext.suspend()")}} 方法)</li>
- <li><code>running</code>: audio context正常运行</li>
- <li><code>closed</code>: audio context被关闭了(用{{domxref("AudioContext.close()")}}方法)</li>
-</ul>
-
-<h2 id="例子">例子</h2>
-
-<p>下面这段代码是<a href="https://github.com/mdn/audiocontext-states/settings">AudioContext states demo</a> (直接运行)中的,其中{{domxref("AudioContext.onstatechange")}}处理器会在每次当前状态发生变化时把它输出到控制台。</p>
-
-<pre class="brush: js">audioCtx.onstatechange = function() {
-  console.log(audioCtx.state);
-}
-</pre>
-
-<h2 id="规范">规范</h2>
-
-<table class="standard-table">
- <tbody>
- <tr>
- <th scope="col">Specification</th>
- <th scope="col">Status</th>
- <th scope="col">Comment</th>
- </tr>
- <tr>
- <td>{{SpecName('Web Audio API', '#widl-AudioContext-state', 'state')}}</td>
- <td>{{Spec2('Web Audio API')}}</td>
- <td> </td>
- </tr>
- </tbody>
-</table>
-
-<h2 id="浏览器兼容性">浏览器兼容性</h2>
-
-<div>{{CompatibilityTable}}</div>
-
-<div id="compat-desktop">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Chrome</th>
- <th>Firefox (Gecko)</th>
- <th>Internet Explorer</th>
- <th>Opera</th>
- <th>Safari (WebKit)</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatChrome(43.0)}}</td>
- <td>{{CompatGeckoDesktop(40.0)}} </td>
- <td>{{CompatNo}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<div id="compat-mobile">
-<table class="compat-table">
- <tbody>
- <tr>
- <th>Feature</th>
- <th>Android</th>
- <th>Firefox Mobile (Gecko)</th>
- <th>Firefox OS</th>
- <th>IE Mobile</th>
- <th>Opera Mobile</th>
- <th>Safari Mobile</th>
- <th>Chrome for Android</th>
- </tr>
- <tr>
- <td>Basic support</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- <td>{{CompatUnknown}}</td>
- </tr>
- </tbody>
-</table>
-</div>
-
-<h2 id="另见">另见</h2>
-
-<ul>
- <li><a href="/en-US/docs/Web_Audio_API/Using_Web_Audio_API">Using the Web Audio API</a></li>
-</ul>