我正在开发一个需要TTS的应用程序。我已经实现了recognitionlistener,它第一次运行得很好。之后它仍然监听并返回结果,但onrmschanged不再被调用。
下面是我开始语音识别的方法:
private void promptSpeechInput(String langCode) {
Intent speechIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
speechIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
mContext.getPackageName());
speechIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE,
langCode);
speechIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mIsSpeechRecognitionOn = true;
mSpeechRecognizer.startListening(speechIntent);
}
以及实现了RecognitionListener的类:
public class ConversationFragment extends Fragment implements RecognitionListener {
private final String LOG_TAG = ConversationFragment.class.getSimpleName();
private static final int PERMISSION_REQUEST_RECORD_AUDIO = 1;
private Translator mTranslator;
private SpeechRecognizer mSpeechRecognizer;
private TextToSpeech mTextToSpeech;
private View mRootView;
private TranslationPanel mTranslationPanel;
private RecyclerView mConversationView;
private TextView mEmptyConversationView;
private ChatAdapter mConversationAdapter;
private Set<Locale> mLocales;
public ConversationFragment() {
}
@Override
public View onCreateView(LayoutInflater inflater, final ViewGroup container,
Bundle savedInstanceState) {
mRootView = inflater.inflate(R.layout.fragment_conversation, container, false);
mTranslator = new Translator(getContext());
setupSpeechRecognizer();
mTranslationPanel = new TranslationPanel(getContext(), mRootView, mSpeechRecognizer);
mTextToSpeech = new TextToSpeech(getContext(), new TextToSpeech.OnInitListener() {
@Override
public void onInit(int status) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mLocales = mTextToSpeech.getAvailableLanguages();
}
mConversationAdapter.setTextToSpeech(mTextToSpeech);
}
});
mEmptyConversationView = (TextView) mRootView.findViewById(R.id.empty_conversation_textview);
mConversationView = (RecyclerView) mRootView.findViewById(R.id.conversation_container);
RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(getContext());
((LinearLayoutManager) layoutManager).setStackFromEnd(true);
mConversationView.setLayoutManager(layoutManager);
List<Translation> translations = Translation.listAll(Translation.class);
mConversationAdapter = new ChatAdapter(getContext(), translations);
mConversationView.setAdapter(mConversationAdapter);
mConversationView.scrollToPosition(mConversationAdapter.getItemCount() - 1);
if (translations == null || translations.size() == 0) {
mConversationView.setVisibility(View.GONE);
mEmptyConversationView.setVisibility(View.VISIBLE);
}
checkMicrophonePermission();
return mRootView;
}
public void checkMicrophonePermission() {
int microphonePermission =
ContextCompat.checkSelfPermission(getContext(), android.Manifest.permission.RECORD_AUDIO);
if (microphonePermission != PackageManager.PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(getActivity(),
new String[]{android.Manifest.permission.RECORD_AUDIO},
PERMISSION_REQUEST_RECORD_AUDIO);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
@NonNull int[] grantResults) {
switch (requestCode) {
case PERMISSION_REQUEST_RECORD_AUDIO:
if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
mTranslationPanel.getLeftTranslator().setEnabled(true);
mTranslationPanel.getRightTranslator().setEnabled(true);
} else {
mTranslationPanel.getLeftTranslator().setEnabled(false);
mTranslationPanel.getRightTranslator().setEnabled(false);
}
}
}
private void setupSpeechRecognizer() {
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(getContext());
mSpeechRecognizer.setRecognitionListener(this);
}
@Override
public void onDestroy() {
super.onDestroy();
if (mTextToSpeech != null) {
mTextToSpeech.shutdown();
}
if (mSpeechRecognizer != null) {
mSpeechRecognizer.destroy();
}
}
private void translate(final String text) {
String leftLanguageCode = Utility.getCodeFromLanguage(getContext(),
Utility.getTranslatorLanguage(getContext(), Utility.LEFT_TRANSLATOR_LANGUAGE));
String rightLanguageCode = Utility.getCodeFromLanguage(getContext(),
Utility.getTranslatorLanguage(getContext(), Utility.RIGHT_TRANSLATOR_LANGUAGE));
Callback<TranslationResult> callback = new Callback<TranslationResult>() {
@Override
public void onResponse(Call<TranslationResult> call, Response<TranslationResult> response) {
final TranslationResult translation = response.body();
final String lang = translation.getLang();
String translatedText = translation.getText()[0];
Translation chatTranslation = mTranslationPanel.hasJustUsedLeftTranslator() ?
new Translation(translatedText, text, true, lang) :
new Translation(translatedText, text, false, lang);
if (mConversationView.getVisibility() == View.GONE) {
mConversationView.setVisibility(View.VISIBLE);
mEmptyConversationView.setVisibility(View.GONE);
}
mConversationAdapter.addTranslation(chatTranslation);
chatTranslation.save();
if (mConversationAdapter.getItemCount() > 0) {
mConversationView.scrollToPosition(mConversationAdapter.getItemCount() - 1);
}
speakText(translatedText, lang);
}
@Override
public void onFailure(Call<TranslationResult> call, Throwable t) {
Log.e(LOG_TAG, "Something went wrong.");
}
};
if (mTranslationPanel.hasJustUsedLeftTranslator()) {
mTranslator.translate(text, leftLanguageCode + "-" + rightLanguageCode, callback);
} else {
mTranslator.translate(text, rightLanguageCode + "-" + leftLanguageCode, callback);
}
}
private void speakText(String text, String language) {
if (mTextToSpeech == null || mTextToSpeech.isSpeaking() || mLocales == null) {
return;
}
String langCode = Utility.getTranslatedLanguage(language);
if (langCode.equals(getString(R.string.lang_code_bg))) {
langCode = getString(R.string.lang_code_ru);
text = Utility.editBulgarianTextForRussianReading(text);
}
Locale locale = Utility.getLocaleFromLangCode(langCode, mLocales);
if (locale == null) {
Log.d(LOG_TAG, "Language not supported by TextToSpeech.");
return;
}
mTextToSpeech.setLanguage(locale);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mTextToSpeech.speak(text, TextToSpeech.QUEUE_FLUSH, null, null);
} else {
mTextToSpeech.speak(text, TextToSpeech.QUEUE_FLUSH, null);
}
}
@Override
public void onReadyForSpeech(Bundle params) {
Log.d(LOG_TAG, "onReadyforSpeach");
}
@Override
public void onBeginningOfSpeech() {
mTranslationPanel.setAnimationOn(true);
Log.d(LOG_TAG, "onBeginningOfSpeach");
}
@Override
public void onRmsChanged(float rmsdB) {
mTranslationPanel.onRmsChanged(rmsdB);
Log.d(LOG_TAG, "onRmsChanged");
}
@Override
public void onBufferReceived(byte[] buffer) {
Log.d(LOG_TAG, "onBufferReceived");
}
@Override
public void onEndOfSpeech() {
mTranslationPanel.setAnimationOn(false);
Log.d(LOG_TAG, "onEndOfSpeach");
}
@Override
public void onError(int error) {
String errorMessage = getErrorText(error);
Log.d(LOG_TAG, "FAILED " + errorMessage);
mTranslationPanel.setAnimationOn(false);
}
@Override
public void onResults(Bundle results) {
ArrayList<String> matches = results
.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
String text = null;
if (matches != null) {
text = matches.get(0);
}
Log.d(LOG_TAG, text);
translate(text);
}
@Override
public void onPartialResults(Bundle partialResults) {
Log.d(LOG_TAG, "onPartialResults");
}
@Override
public void onEvent(int eventType, Bundle params) {
Log.d(LOG_TAG, "onEvent");
}
public static String getErrorText(int errorCode) {
String message;
switch (errorCode) {
case SpeechRecognizer.ERROR_AUDIO:
message = "Audio recording error";
break;
case SpeechRecognizer.ERROR_CLIENT:
message = "Client side error";
break;
case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
message = "Insufficient permissions";
break;
case SpeechRecognizer.ERROR_NETWORK:
message = "Network error";
break;
case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
message = "Network timeout";
break;
case SpeechRecognizer.ERROR_NO_MATCH:
message = "No match";
break;
case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
message = "RecognitionService busy";
break;
case SpeechRecognizer.ERROR_SERVER:
message = "error from server";
break;
case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
message = "No speech input";
break;
default:
message = "Didn't understand, please try again.";
break;
}
return message;
}
}
我在网上到处寻找解决办法,但没有找到,如果有人能帮我,我会非常感谢。
最佳答案
试试这个:
public void onResults(Bundle results)
{
ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
mText.setText("results: "+data.get(0));
sr.startListening(intent);
sr.cancel();
}