我目前正在寻找使用JAVA的职业,并决定从构建应用程序开始。我在这里使用此代码来触发语音识别。
public class MainActivity extends Activity implements OnClickListener{
private static final int VR_REQUEST = 999;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Button speechBtn = (Button) findViewById(R.id.speech_btn);
wordList = (ListView) findViewById (R.id.word_list);
PackageManager packManager= getPackageManager();
List<ResolveInfo> intActivities = packManager.queryIntentActivities
(new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
if (intActivities.size() !=0){
speechBtn.setOnClickListener(this);
} else {
speechBtn.setEnabled(false);
Toast.makeText(this,"Oops - Speech Recognition Not Supported!",
Toast.LENGTH_LONG).show();
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
public void onClick(View v){
if (v.getId() == R.id.speech_btn) {
listenToSpeech();
}
}
private void listenToSpeech() {
//start the speech recognition intent passing required data
Intent listenIntent =
new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
//indicate package
listenIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
getClass().getPackage().getName());
//message to display while listening
listenIntent.putExtra(RecognizerIntent.EXTRA_PROMPT, "Say a word!");
//set speech model
listenIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
//specify number of results to retrieve
listenIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 10);
//start listening
startActivityForResult(listenIntent, VR_REQUEST);
}
@Override
protected void onActivityResult(int requestCode,
int resultCode, Intent data) {
//check speech recognition result
if (requestCode == VR_REQUEST && resultCode == RESULT_OK) {
//store the returned word list as an ArrayList
ArrayList<String> suggestedWords = data.
getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
//set the retrieved list to display in the ListView
//using an ArrayAdapter
wordList.setAdapter(new ArrayAdapter<String>
(this, R.layout.word, suggestedWords));
}
//this detects which one the user clicks
wordList.setOnItemClickListener(new OnItemClickListener(){
//click listener for items within list
public void onItemClick(AdapterView<?> parent,
View view, int position, long id){
//cast the
TextView wordView = (TextView)
//retrive the chosen word
String wordChosen= (String) wordView.
//output for debugging
Log.v(LOG_TAG, "chosen:" +wordChosen);
}});
super.onActivityResult(requestCode, resultCode, data);
}
}
在此应用中,用户按下一个按钮并显示在Google语音输入屏幕中,您可以在其中单击一个按钮(它实际上会自动显示),您可以说出来,它将停止并显示出来。我根本不希望该窗口弹出。取而代之的是让用户单击按钮并能够讲话,让应用程序停止并自动显示文本(它已经这样做了)。
请!我了解表单上已经有答案显示如何执行此操作,实际上,用户名JEEZ在此处发布了一些代码。
我不知道我是否知道将其放在项目文件中的位置。我是新手!如果有人可以帮助您澄清这一点,我将非常感谢您的帮助。
这是我的代码:
package com.example.speechrecognizertest;
import android.os.Bundle;
import java.util.ArrayList;
import java.util.List;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.Toast;
import android.widget.TextView;
import android.app.Activity;
import android.view.Menu;
public class MainActivity extends Activity {
private static final int VR_REQUEST = 999;
public static final String TAG = null;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";
private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent;
private boolean mIslistening;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Button speechBtn = (Button) findViewById(R.id.speech_btn);
wordList = (ListView) findViewById(R.id.word_list);
PackageManager packManager = getPackageManager();
List<ResolveInfo> intActivities = packManager.queryIntentActivities(
new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
if (!mIslistening)
{
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
} else {
speechBtn.setEnabled(false);
Toast.makeText(this, "Oops - Speech Recognition Not Supported!",
Toast.LENGTH_LONG).show();
}
}
@Override
protected void onDestroy() {
// TODO Auto-generated method stub
super.onDestroy();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
protected class SpeechRecognitionListener implements RecognitionListener
{
@Override
public void onBeginningOfSpeech()
{
//Log.d(TAG, "onBeginingOfSpeech");
}
@Override
public void onBufferReceived(byte[] buffer)
{
}
@Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech");
}
@Override
public void onError(int error)
{
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
//Log.d(TAG, "error = " + error);
}
@Override
public void onEvent(int eventType, Bundle params)
{
}
@Override
public void onPartialResults(Bundle partialResults)
{
}
@Override
public void onReadyForSpeech(Bundle params)
{
Log.d(TAG, "OnReadyForSpeech"); //$NON-NLS-1$
}
@Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
ArrayList<String> suggestedWords = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
// matches are the return values of speech recognition engine
// Use these values for whatever you wish to do
wordList.setAdapter(new ArrayAdapter<String>(this, R.layout.word, suggestedWords));
}
@Override
public void onRmsChanged(float rmsdB)
{
}
}
AndroidManifest.xml
添加以下权限:
<uses-permission android:name="android.permission.RECORD_AUDIO" />
班级成员
private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent;
private boolean mIslistening;
在onCreate中
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
.........
.........
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
SpeechRecognitionListener listener = new SpeechRecognitionListener();
mSpeechRecognizer.setRecognitionListener(listener);
}
在您的按钮侦听器中只需使用此代码
if (!mIsListening)
{
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
在onDestroy
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
在您的活动中创建内部类
protected class SpeechRecognitionListener implements RecognitionListener
{
@Override
public void onBeginningOfSpeech()
{
//Log.d(TAG, "onBeginingOfSpeech");
}
@Override
public void onBufferReceived(byte[] buffer)
{
}
@Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech");
}
@Override
public void onError(int error)
{
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
//Log.d(TAG, "error = " + error);
}
@Override
public void onEvent(int eventType, Bundle params)
{
}
@Override
public void onPartialResults(Bundle partialResults)
{
}
@Override
public void onReadyForSpeech(Bundle params)
{
Log.d(TAG, "onReadyForSpeech"); //$NON-NLS-1$
}
@Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
ArrayList<String> matches = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
// matches are the return values of speech recognition engine
// Use these values for whatever you wish to do
}
@Override
public void onRmsChanged(float rmsdB)
{
}
}
EDIT
2015-02-07:将ZakiMak)和Born ToWin对此问题的答案中的代码合并到此答案中的代码中,以使此答案更加完整。
我正在从事一个关于“家庭自动化”系统的项目,该系统使用Android应用程序和微控制器,通过蓝牙模块将它们连接起来。我已经将“语音到文本”合并到语音命令中。尽管如此,它与内置的谷歌语音识别api配合得很好。我所需要的只是一个连续的语音识别,而不需要谷歌语音弹出窗口。我在playstore“speechnotes”上看到了一个类似的应用程序,这正是我想要开发的。它完全离线工作。
我正在寻找一个Android语音识别库。我只需要它来理解“是/否”的答案(用不同的语言,如英语、德语、法语)。 有什么建议吗?
[可能重复]但我没有找到下面问题的答案。 在过去的两天里,我一直在做一些语音识别方面的研究,但我的问题没有得到答案: 是否可以将语音识别作为一项服务运行?我想实现这样的功能:虽然我的手机处于睡眠模式,但我需要拨打一个号码
附上的图片是Android应用的。我正在尝试使用Android的UIAutomator查看器进行检查。在它中,我无法检查在前景上的弹出窗口。我想在该窗口中单击“解除” 请建议是否可以检测到这一点或使用任何其他工具
问题内容: 我正在研究语音识别,需要一些示例程序。 谁能指导我? 问题答案: 让我剪切粘贴一下,向您展示所需的代码。 编辑:您还可以从该项目下载一个方便的抽象类。 您将需要此意图(在您认为合适的情况下进行参数化): 然后,您需要将自己的意图发送到语音识别活动,例如, 然后,您需要让自己的活动处理语音结果:
我想制作一个语音识别器应用程序来转录用户的语音。我不想在这样做的时候有任何对话,所以是不可能的。(我知道如果我使用这种方法,我可以获得音频) 我正在使用SpeechRecognitor,并调用startListening来收听用户的音频。我在onResults中得到了非常准确的结果。 现在,我还需要在我的设备SD卡中存储用户的音频。为此,我尝试了MediaRecorder和AudioRecord,