aboutsummaryrefslogtreecommitdiff
path: root/files/zh-cn/web/api/speechrecognition/index.html
diff options
context:
space:
mode:
Diffstat (limited to 'files/zh-cn/web/api/speechrecognition/index.html')
-rw-r--r--files/zh-cn/web/api/speechrecognition/index.html154
1 files changed, 154 insertions, 0 deletions
diff --git a/files/zh-cn/web/api/speechrecognition/index.html b/files/zh-cn/web/api/speechrecognition/index.html
new file mode 100644
index 0000000000..2f8bca2c84
--- /dev/null
+++ b/files/zh-cn/web/api/speechrecognition/index.html
@@ -0,0 +1,154 @@
+---
+title: 语音识别
+slug: Web/API/SpeechRecognition
+translation_of: Web/API/SpeechRecognition
+original_slug: Web/API/语音识别
+---
+<p>{{APIRef("Web Speech API")}}{{SeeCompatTable}}</p>
+
+<p>The <strong><code>SpeechRecognition</code></strong> interface of the <a href="/en-US/docs/Web/API/Web_Speech_API">Web Speech API</a> is the controller interface for the recognition service; this also handles the {{domxref("SpeechRecognitionEvent")}} sent from the recognition service.</p>
+
+<div class="note">
+<p><strong>Note</strong>: On Chrome, using Speech Recognition on a web page involves a server-based recognition engine. Your audio is sent to a web service for recognition processing, so it won't work offline.</p>
+</div>
+
+<h2 id="Constructor">Constructor</h2>
+
+<dl>
+ <dt>{{domxref("SpeechRecognition.SpeechRecognition()")}}</dt>
+ <dd>Creates a new <code>SpeechRecognition</code> object.</dd>
+</dl>
+
+<h2 id="Properties">Properties</h2>
+
+<p><em><code>SpeechRecognition</code> also inherits properties from its parent interface, {{domxref("EventTarget")}}.</em></p>
+
+<dl>
+ <dt>{{domxref("SpeechRecognition.grammars")}}</dt>
+ <dd>Returns and sets a collection of {{domxref("SpeechGrammar")}} objects that represent the grammars that will be understood by the current <code>SpeechRecognition</code>.</dd>
+ <dt>{{domxref("SpeechRecognition.lang")}}</dt>
+ <dd>Returns and sets the language of the current <code>SpeechRecognition</code>. If not specified, this defaults to the HTML {{htmlattrxref("lang","html")}} attribute value, or the user agent's language setting if that isn't set either.</dd>
+ <dt>{{domxref("SpeechRecognition.continuous")}}</dt>
+ <dd>Controls whether continuous results are returned for each recognition, or only a single result. Defaults to single (<code>false</code>.)</dd>
+ <dt>{{domxref("SpeechRecognition.interimResults")}}</dt>
+ <dd>Controls whether interim results should be returned (<code>true</code>) or not (<code>false</code>.) Interim results are results that are not yet final (e.g. the {{domxref("SpeechRecognitionResult.isFinal")}} property is <code>false</code>.)</dd>
+ <dt>{{domxref("SpeechRecognition.maxAlternatives")}}</dt>
+ <dd>Sets the maximum number of {{domxref("SpeechRecognitionAlternative")}}s provided per result. The default value is 1.</dd>
+ <dt>{{domxref("SpeechRecognition.serviceURI")}}</dt>
+ <dd>Specifies the location of the speech recognition service used by the current <code>SpeechRecognition</code> to handle the actual recognition. The default is the user agent's default speech service.</dd>
+</dl>
+
+<dl>
+</dl>
+
+<h2 id="Methods">Methods</h2>
+
+<p><em><code>SpeechRecognition</code> also inherits methods from its parent interface, {{domxref("EventTarget")}}.</em></p>
+
+<dl>
+ <dt>{{domxref("SpeechRecognition.abort()")}}</dt>
+ <dd>Stops the speech recognition service from listening to incoming audio, and doesn't attempt to return a {{domxref("SpeechRecognitionResult")}}.</dd>
+ <dt>{{domxref("SpeechRecognition.start()")}}</dt>
+ <dd>Starts the speech recognition service listening to incoming audio with intent to recognize grammars associated with the current <code>SpeechRecognition</code>.</dd>
+ <dt>{{domxref("SpeechRecognition.stop()")}}</dt>
+ <dd>Stops the speech recognition service from listening to incoming audio, and attempts to return a {{domxref("SpeechRecognitionResult")}} using the audio captured so far.</dd>
+</dl>
+
+<h2 id="Events">Events</h2>
+
+<p>Listen to these events using <code><a href="/en-US/docs/Web/API/EventTarget/addEventListener">addEventListener()</a></code> or by assigning an event listener to the <code>on<em>eventname</em></code> property of this interface.</p>
+
+<dl>
+ <dt><a href="/en-US/docs/Web/API/SpeechRecognition/audiostart_event"><code>audiostart</code></a></dt>
+ <dd>Fired when the user agent has started to capture audio.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onaudiostart">onaudiostart</a></code> property.</dd>
+ <dt><a href="/en-US/docs/Web/API/SpeechRecognition/audioend_event"><code>audioend</code></a></dt>
+ <dd>Fired when the user agent has finished capturing audio.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onaudioend">onaudioend</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/end_event">end</a></code></dt>
+ <dd>Fired when the speech recognition service has disconnected.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onend">onend</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/error_event">error</a></code></dt>
+ <dd>Fired when a speech recognition error occurs.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onerror">onerror</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/nomatch_event">nomatch</a></code></dt>
+ <dd>Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the {{domxref("SpeechRecognitionAlternative.confidence","confidence")}} threshold.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onnomatch">onnomatch</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/result_event">result</a></code></dt>
+ <dd>Fired when the speech recognition service returns a result — a word or phrase has been positively recognized and this has been communicated back to the app.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onresult">onresult</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/soundstart_event">soundstart</a></code></dt>
+ <dd>Fired when any sound — recognisable speech or not — has been detected.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onsoundstart">onsoundstart</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/soundend_event">soundend</a></code></dt>
+ <dd>Fired when any sound — recognisable speech or not — has stopped being detected.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onsoundend">onsoundend</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/speechstart_event">speechstart</a></code></dt>
+ <dd>Fired when sound that is recognised by the speech recognition service as speech has been detected.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onspeechstart">onspeechstart</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/speechend_event">speechend</a></code></dt>
+ <dd>Fired when speech recognised by the speech recognition service has stopped being detected.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechRecognition/onspeechend">onspeechend</a></code> property.</dd>
+ <dt><code><a href="/en-US/docs/Web/API/SpeechRecognition/start_event">start</a></code></dt>
+ <dd>Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current <code>SpeechRecognition</code>.<br>
+ Also available via the <code><a href="/en-US/docs/Web/API/SpeechSynthesisUtterance/onstart">onstart</a></code> property.</dd>
+</dl>
+
+<h2 id="Examples">Examples</h2>
+
+<p>In our simple <a href="https://github.com/mdn/web-speech-api/tree/master/speech-color-changer">Speech color changer</a> example, we create a new <code>SpeechRecognition</code> object instance using the {{domxref("SpeechRecognition.SpeechRecognition", "SpeechRecognition()")}} constructor, create a new {{domxref("SpeechGrammarList")}}, and set it to be the grammar that will be recognised by the <code>SpeechRecognition</code> instance using the {{domxref("SpeechRecognition.grammars")}} property.</p>
+
+<p>After some other values have been defined, we then set it so that the recognition service starts when a click event occurs (see {{domxref("SpeechRecognition.start()")}}.) When a result has been successfully recognised, the {{domxref("SpeechRecognition.onresult")}} handler fires,  we extract the color that was spoken from the event object, and then set the background color of the {{htmlelement("html")}} element to that colour.</p>
+
+<pre class="brush: js">var grammar = '#JSGF V1.0; grammar colors; public &lt;color&gt; = aqua | azure | beige | bisque | black | blue | brown | chocolate | coral | crimson | cyan | fuchsia | ghostwhite | gold | goldenrod | gray | green | indigo | ivory | khaki | lavender | lime | linen | magenta | maroon | moccasin | navy | olive | orange | orchid | peru | pink | plum | purple | red | salmon | sienna | silver | snow | tan | teal | thistle | tomato | turquoise | violet | white | yellow ;'
+var recognition = new SpeechRecognition();
+var speechRecognitionList = new SpeechGrammarList();
+speechRecognitionList.addFromString(grammar, 1);
+recognition.grammars = speechRecognitionList;
+//recognition.continuous = false;
+recognition.lang = 'en-US';
+recognition.interimResults = false;
+recognition.maxAlternatives = 1;
+
+var diagnostic = document.querySelector('.output');
+var bg = document.querySelector('html');
+
+document.body.onclick = function() {
+ recognition.start();
+ console.log('Ready to receive a color command.');
+}
+
+recognition.onresult = function(event) {
+ var color = event.results[0][0].transcript;
+ diagnostic.textContent = 'Result received: ' + color;
+ bg.style.backgroundColor = color;
+}</pre>
+
+<h2 id="Specifications">Specifications</h2>
+
+<table class="standard-table">
+ <tbody>
+ <tr>
+ <th scope="col">Specification</th>
+ <th scope="col">Status</th>
+ <th scope="col">Comment</th>
+ </tr>
+ <tr>
+ <td>{{SpecName('Web Speech API', '#speechreco-section', 'SpeechRecognition')}}</td>
+ <td>{{Spec2('Web Speech API')}}</td>
+ <td> </td>
+ </tr>
+ </tbody>
+</table>
+
+<h2 id="Browser_compatibility">Browser compatibility</h2>
+
+
+
+<p>{{Compat("api.SpeechRecognition")}}</p>
+
+<h2 id="See_also">See also</h2>
+
+<ul>
+ <li><a href="/en-US/docs/Web/API/Web_Speech_API">Web Speech API</a></li>
+</ul>