Example usage for android.speech SpeechRecognizer ERROR_NO_MATCH

List of usage examples for android.speech SpeechRecognizer ERROR_NO_MATCH

Introduction

In this page you can find the example usage for android.speech SpeechRecognizer ERROR_NO_MATCH.

Prototype

int ERROR_NO_MATCH

To view the source code for android.speech SpeechRecognizer ERROR_NO_MATCH.

Click Source Link

Document

No recognition result matched.

Usage

From source file:com.chexiaoya.gaodemapdemo.SpeechSearchActivity.java

@Override
public void onError(int error) {
    status = STATUS_None;/*w w  w. j av a 2s.co  m*/
    StringBuilder sb = new StringBuilder();
    switch (error) {
    case SpeechRecognizer.ERROR_AUDIO:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_CLIENT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
        sb.append("???");
        break;
    case SpeechRecognizer.ERROR_NETWORK:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SERVER:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        sb.append("");
        break;
    }
    sb.append(":" + error);
    showToast(sb.toString());
}

From source file:conversandroid.MainActivity.java

/**
 * Provides feedback to the user when the ASR encounters an error
 *///from  ww w.  j  a  v a 2 s  . com
@Override
public void processAsrError(int errorCode) {
    changeButtonAppearanceToDefault();

    //Possible bug in Android SpeechRecognizer: NO_MATCH errors even before the the ASR
    // has even tried to recognized. We have adopted the solution proposed in:
    // http://stackoverflow.com/questions/31071650/speechrecognizer-throws-onerror-on-the-first-listening
    long duration = System.currentTimeMillis() - startListeningTime;
    if (duration < 500 && errorCode == SpeechRecognizer.ERROR_NO_MATCH) {
        Log.e(LOGTAG, "Doesn't seem like the system tried to listen at all. duration = " + duration
                + "ms. Going to ignore the error");
        stopListening();
    } else {
        String errorMsg = "";
        switch (errorCode) {
        case SpeechRecognizer.ERROR_AUDIO:
            errorMsg = "Audio recording error";
        case SpeechRecognizer.ERROR_CLIENT:
            errorMsg = "Unknown client side error";
        case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
            errorMsg = "Insufficient permissions";
        case SpeechRecognizer.ERROR_NETWORK:
            errorMsg = "Network related error";
        case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
            errorMsg = "Network operation timed out";
        case SpeechRecognizer.ERROR_NO_MATCH:
            errorMsg = "No recognition result matched";
        case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
            errorMsg = "RecognitionService busy";
        case SpeechRecognizer.ERROR_SERVER:
            errorMsg = "Server sends error status";
        case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
            errorMsg = "No speech input";
        default:
            errorMsg = ""; //Another frequent error that is not really due to the ASR, we will ignore it
        }
        if (errorMsg != "") {
            Log.e(LOGTAG, "Error when attempting to listen: " + errorMsg);

            try {
                speak(errorMsg, "EN", ID_PROMPT_INFO);
            } catch (Exception e) {
                Log.e(LOGTAG, "English not available for TTS, default language used instead");
            }
        }
    }

}

From source file:com.baidu.android.voicedemo.ActivityTouch.java

@Override
public void onError(int error) {
    StringBuilder sb = new StringBuilder();
    switch (error) {
    case SpeechRecognizer.ERROR_AUDIO:
        sb.append("");
        break;//from   w w w .jav  a2 s  .co m
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_CLIENT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
        sb.append("???");
        break;
    case SpeechRecognizer.ERROR_NETWORK:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SERVER:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        sb.append("");
        break;
    }
    sb.append(":" + error);
    print("" + sb.toString());
}

From source file:com.ct.speech.HintReceiver.java

/**
 * Fire an intent to start the speech recognition activity.
 * // w  ww.j a va2s .  co m
 * @param args
 *            Argument array with the following string args: [req
 *            code][number of matches][prompt string] Google speech
 *            recognizer
 */

private void startSpeechRecognitionActivity(JSONArray args) {
    // int reqCode = 42; // Hitchhiker? // global now
    int maxMatches = 2;
    String prompt = "";
    String language = "";
    try {
        if (args.length() > 0) {
            // Request code - passed back to the caller on a successful
            // operation
            String temp = args.getString(0);
            reqCode = Integer.parseInt(temp);
        }
        if (args.length() > 1) {
            // Maximum number of matches, 0 means the recognizer decides
            String temp = args.getString(1);
            maxMatches = Integer.parseInt(temp);
        }
        if (args.length() > 2) {
            // Optional text prompt
            prompt = args.getString(2);
        }
        if (args.length() > 3) {
            // Optional language specified
            language = args.getString(3);
        }
    } catch (Exception e) {
        Log.e(TAG, String.format("startSpeechRecognitionActivity exception: %s", e.toString()));
    }
    final Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    intent.putExtra("calling_package", "com.ct.BasicAppFrame");
    // If specific language
    if (!language.equals("")) {
        intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, language);
    }
    if (maxMatches > 0)
        intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, maxMatches);
    if (!(prompt.length() == 0))
        intent.putExtra(RecognizerIntent.EXTRA_PROMPT, prompt);
    // ctx.startActivityForResult(this, intent, reqCode); //removed to try
    // using recognizer directly
    try {
        this.ctx.runOnUiThread(new Runnable() {
            public void run() {
                final SpeechRecognizer recognizer = SpeechRecognizer.createSpeechRecognizer((Context) ctx);
                RecognitionListener listener = new RecognitionListener() {
                    @Override
                    public void onResults(Bundle results) {
                        //closeRecordedFile();
                        sendBackResults(results);
                        ArrayList<String> voiceResults = results
                                .getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
                        if (voiceResults == null) {
                            Log.e(TAG, "No voice results");
                        } else {
                            // Log.d(TAG, "Printing matches: ");
                            for (@SuppressWarnings("unused")
                            String match : voiceResults) {
                                // Log.d(TAG, match);
                            }
                        }
                        recognizer.destroy();
                    }

                    @Override
                    public void onReadyForSpeech(Bundle params) {
                        // Log.d(TAG, "Ready for speech");
                    }

                    @Override
                    public void onError(int error) {
                        Log.d(TAG, "Error listening for speech: " + error);
                        if (error == SpeechRecognizer.ERROR_NO_MATCH) {
                            sendBackResults(NO_MATCH);
                        } else if (error == SpeechRecognizer.ERROR_SPEECH_TIMEOUT) {
                            sendBackResults(NO_INPUT);
                        } else {
                            speechFailure("unknown error");
                        }
                        recognizer.destroy();
                    }

                    @Override
                    public void onBeginningOfSpeech() {
                        // Log.d(TAG, "Speech starting");
                        setStartOfSpeech();
                    }

                    @Override
                    //doesn't fire in Android after Ice Cream Sandwich
                    public void onBufferReceived(byte[] buffer) {
                    }

                    @Override
                    public void onEndOfSpeech() {
                        setEndOfSpeech();
                    }

                    @Override
                    public void onEvent(int eventType, Bundle params) {
                        // TODO Auto-generated method stub

                    }

                    @Override
                    public void onPartialResults(Bundle partialResults) {
                        // TODO Auto-generated method stub

                    }

                    @Override
                    public void onRmsChanged(float rmsdB) {
                        // TODO Auto-generated method stub

                    }
                };
                recognizer.setRecognitionListener(listener);
                Log.d(TAG, "starting speech recognition activity");
                recognizer.startListening(intent);
            }
        });
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:atlc.granadaaccessibilityranking.VoiceActivity.java

/********************************************************************************************************
 * This class implements the {@link android.speech.RecognitionListener} interface,
 * thus it implement its methods. However not all of them were interesting to us:
 * ******************************************************************************************************
 *///w w  w . j  a  v a 2 s  .  c  o  m

@SuppressLint("InlinedApi")
/*
 * (non-Javadoc)
 *
 * Invoked when the ASR provides recognition results
 *
 * @see android.speech.RecognitionListener#onResults(android.os.Bundle)
 */
@Override
public void onResults(Bundle results) {
    if (results != null) {

        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) { //Checks the API level because the confidence scores are supported only from API level 14:
            //http://developer.android.com/reference/android/speech/SpeechRecognizer.html#CONFIDENCE_SCORES
            //Processes the recognition results and their confidences
            processAsrResults(results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION),
                    results.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES));
            //                                 Attention: It is not RecognizerIntent.EXTRA_RESULTS, that is for intents (see the ASRWithIntent app)
        } else {
            //Processes the recognition results and their confidences
            processAsrResults(results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION), null);
        }
    } else
        //Processes recognition errors
        processAsrError(SpeechRecognizer.ERROR_NO_MATCH);
}

From source file:com.baidu.android.voicedemo.ApiActivity.java

@Override
public void onError(int error) {
    status = STATUS_None;// www.ja  v a2s .c o  m
    StringBuilder sb = new StringBuilder();
    switch (error) {
    case SpeechRecognizer.ERROR_AUDIO:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_CLIENT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
        sb.append("???");
        break;
    case SpeechRecognizer.ERROR_NETWORK:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SERVER:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        sb.append("");
        break;
    }
    sb.append(":" + error);
    print("" + sb.toString());
    btn.setText("");
}

From source file:cn.laojing.smarthome.VoiceActivity.java

@Override
public void onError(int error) {
    StringBuilder sb = new StringBuilder();
    switch (error) {
    case SpeechRecognizer.ERROR_AUDIO:
        sb.append("");
        break;//from  ww w  .ja va2s . com
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_CLIENT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
        sb.append("???");
        break;
    case SpeechRecognizer.ERROR_NETWORK:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SERVER:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        sb.append("");
        break;
    }
    sb.append(":" + error);
    print("" + sb.toString());
    start();
}

From source file:conversandroid.RichASR.java

@Override
public void onError(final int errorCode) {

    //Possible bug in Android SpeechRecognizer: NO_MATCH errors even before the the ASR
    // has even tried to recognized. We have adopted the solution proposed in:
    // http://stackoverflow.com/questions/31071650/speechrecognizer-throws-onerror-on-the-first-listening
    long duration = System.currentTimeMillis() - startListeningTime;
    if (duration < 500 && errorCode == SpeechRecognizer.ERROR_NO_MATCH) {
        Log.e(LOGTAG, "Doesn't seem like the system tried to listen at all. duration = " + duration
                + "ms. Going to ignore the error");
        stopListening();/*from   w w w  .j av  a 2s.c om*/
    } else {
        String errorMsg = "";
        switch (errorCode) {
        case SpeechRecognizer.ERROR_AUDIO:
            errorMsg = "Audio recording error";
        case SpeechRecognizer.ERROR_CLIENT:
            errorMsg = "Unknown client side error";
        case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
            errorMsg = "Insufficient permissions";
        case SpeechRecognizer.ERROR_NETWORK:
            errorMsg = "Network related error";
        case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
            errorMsg = "Network operation timed out";
        case SpeechRecognizer.ERROR_NO_MATCH:
            errorMsg = "No recognition result matched";
        case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
            errorMsg = "RecognitionService busy";
        case SpeechRecognizer.ERROR_SERVER:
            errorMsg = "Server sends error status";
        case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
            errorMsg = "No speech input";
        default:
            errorMsg = "";
        }
        if (errorCode == 5 && errorMsg == "") {
            //Log.e(LOGTAG, "Going to ignore the error");
            //Another frequent error that is not really due to the ASR
        } else {
            ((TextView) findViewById(R.id.feedbackTxt)).setText("Error :( " + errorMsg);
            Log.e(LOGTAG, "Error -> " + errorMsg);
            stopListening();
        }
    }
}

From source file:com.todoroo.astrid.activity.AstridActivity.java

@Override
public void onSpeechError(int error) {
    TaskListFragment tlf = getTaskListFragment();
    if (tlf != null) {
        QuickAddBar quickAdd = tlf.quickAddBar;
        if (quickAdd != null) {
            VoiceRecognizer vr = quickAdd.getVoiceRecognizer();
            if (vr != null)
                vr.cancel();//w w w . j  a  v a2  s.  co  m
        }
    }

    int errorStr = 0;
    switch (error) {
    case SpeechRecognizer.ERROR_NETWORK:
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        errorStr = R.string.speech_err_network;
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        Toast.makeText(this, R.string.speech_err_no_match, Toast.LENGTH_LONG).show();
        break;
    default:
        errorStr = R.string.speech_err_default;
        break;
    }

    if (errorStr > 0)
        DialogUtilities.okDialog(this, getString(errorStr), null);
}

From source file:cn.jasonlv.siri.activity.MainActivity.java

@Override
public void onError(int error) {
    status = STATUS_None;/*from  w ww . j  ava  2 s. com*/
    StringBuilder sb = new StringBuilder();
    switch (error) {
    case SpeechRecognizer.ERROR_AUDIO:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_CLIENT:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
        sb.append("???");
        break;
    case SpeechRecognizer.ERROR_NETWORK:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_NO_MATCH:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
        sb.append("");
        break;
    case SpeechRecognizer.ERROR_SERVER:
        sb.append("?");
        break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
        sb.append("");
        break;
    }
    sb.append(":" + error);
    print("" + sb.toString());
    //btn.setText("");
}