How to Navigate a Google Glass GDK Immersion Application using Voice Command only?

后端 未结 5 2119
北恋
北恋 2020-12-09 19:56

How would I go about coding a voice trigger to navigate Google Glass Cards?

This is how I see it happening:

1) \"Ok Glass, Start My Program\"

2) Applicatio         


        
5条回答
  •  死守一世寂寞
    2020-12-09 20:02

    this thing define in onCreate method

    mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); 
        //  mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, true);
    
        sr = SpeechRecognizer.createSpeechRecognizer(context);       
        sr.setRecognitionListener(new listener(context));   
    
        //      intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en-US");
        intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);        
        intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
        intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,context.getPackageName());
        sr.startListening(intent);
        Log.i("111111","11111111"+"in");
    

    This listener class simply add in your class

    class  listener implements RecognitionListener          
    {
        Context context1;
        public listener(Context context)
        {
            //Log.i("onError startListening","enter"+"nam");
            context1=context;
        }
        public void onReadyForSpeech(Bundle params)
        {
            //Log.d(TAG, "onReadyForSpeech");
        }
        public void onBeginningOfSpeech()
        {
            //Log.d(TAG, "onBeginningOfSpeech");
        }
        public void onRmsChanged(float rmsdB)
        {
            //Log.d(TAG, "onRmsChanged");
        }
        public void onBufferReceived(byte[] buffer)
        {
            //Log.d(TAG, "onBufferReceived");
        }
        public void onEndOfSpeech()
        {
            //Log.d(TAG, "onEndofSpeech");
            sr.startListening(intent);
        }
        public void onError(int error)
        {
            //Log.d(TAG,  "error " +  error);
            //7 -No recognition result matched.
            //9 - vInsufficient permissions 
            //6 - No speech input 
            //8 RecognitionService busy. 
            //5 Other client side errors. 
            //3 Audio recording error.  
            //  mText.setText("error " + error);
    
            if(error==6 || error==7 || error==4  || error==1 || error==2 || error==5 || error==3 || error==8 || error==9 )
            { 
                sr.startListening(intent);
                //Log.i("onError startListening","onError startListening"+error);
            }
        }
        public void onResults(Bundle results)                   
        {
            //Log.v(TAG,"onResults" + results);
            ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
            for (int i = 0; i < data.size(); i++)
            {
                //Log.d(TAG, "result " + data.get(i));
                //str += data.get(i);
    
            //Toast.makeText(context1, "results: "+data.get(0).toString(), Toast.LENGTH_LONG).show();
            //Log.v("my", "output"+"results: "+data.get(0).toString());
    
            //sr.startListening(intent);
                       }
        }
        public void onPartialResults(Bundle partialResults)
        {
            //Log.d(TAG, "onPartialResults");
        }
        public void onEvent(int eventType, Bundle params)
        {
            //Log.d(TAG, "onEvent " + eventType);
        }
    }
    

提交回复
热议问题