Basically I am learning ios speech recognition module following this tutorial: https://medium.com/ios-os-x-development/speech-recognition-with-swift-in-ios-10-50d5f4e59c48
hey i was getting the same error but now its working absoultely fine.hope this code helps to you too :).
import UIKit
import Speech
class SpeechVC: UIViewController {
@IBOutlet weak var slabel: UILabel!
@IBOutlet weak var sbutton: UIButton!
let audioEngine = AVAudioEngine()
let SpeechRecognizer : SFSpeechRecognizer? = SFSpeechRecognizer()
let request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask:SFSpeechRecognitionTask?
var isRecording = false
override func viewDidLoad() {
super.viewDidLoad()
self.requestSpeechAuthorization()
// Do any additional setup after loading the view, typically from a nib.
}
func recordAndRecognizeSpeech()
{
guard let node = audioEngine.inputNode else { return }
let recordingFormat = node.outputFormat(forBus: 0)
node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer , _ in
self.request.append(buffer)
}
audioEngine.prepare()
do
{
try audioEngine.start()
}catch
{
return print(error)
}
guard let myRecognizer = SFSpeechRecognizer() else {
return
}
if !myRecognizer.isAvailable
{
return
}
recognitionTask = SpeechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
if let result = result
{
let bestString = result.bestTranscription.formattedString
self.slabel.text = bestString
var lastString : String = ""
for segment in result.bestTranscription.segments
{
let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
lastString = bestString.substring(from: indexTo)
}
}else if let error = error
{
print(error)
}
})
}
@IBAction func startAction(_ sender: Any) {
if isRecording == true
{
audioEngine.stop()
recognitionTask?.cancel()
isRecording = false
sbutton.backgroundColor = UIColor.gray
}
else{
self.recordAndRecognizeSpeech()
isRecording = true
sbutton.backgroundColor = UIColor.red
}
}
func cancelRecording()
{
audioEngine.stop()
if let node = audioEngine.inputNode
{
audioEngine.inputNode?.removeTap(onBus: 0)
}
recognitionTask?.cancel()
}
func requestSpeechAuthorization()
{
SFSpeechRecognizer.requestAuthorization { authStatus in
OperationQueue.main.addOperation {
switch authStatus
{
case .authorized :
self.sbutton.isEnabled = true
case .denied :
self.sbutton.isEnabled = false
self.slabel.text = "User denied access to speech recognition"
case .restricted :
self.sbutton.isEnabled = false
self.slabel.text = "Speech Recognition is restricted on this Device"
case .notDetermined :
self.sbutton.isEnabled = false
self.slabel.text = "Speech Recognition not yet authorized"
}
}
}
}
}
This will prevent two errors: The above mentioned Code=216 and the 'SFSpeechAudioBufferRecognitionRequest cannot be re-used' error.
Stop recognition with finish not with cancel
Stop audio
like so:
// stop recognition
recognitionTask?.finish()
recognitionTask = nil
// stop audio
request.endAudio()
audioEngine.stop()
audioEngine.inputNode.removeTap(onBus: 0) // Remove tap on bus when stopping recording.
P.S. audioEngine.inputNode seems to be no longer an optional value, therefore I used no if let construct.
I had the same problem whilst following the same (excellent) tutorial, even when using the example code on GitHub. To solve it, I had to do two things:
Firstly, add request.endAudio()
at the start of the code to stop recording in the startButtonTapped action. This marks the end of the recording. I see you've already done that in your sample code.
Secondly, in the recordAndRecognizeSpeech function, when 'recognitionTask' is started, if no speech was detected then 'result' will be nil and the error case is triggered. So, I tested for result != nil
before attempting to assign the result.
So, the code for those two functions looks as follows: 1. Updated startButtonTapped:
@IBAction func startButtonTapped(_ sender: UIButton) {
if isRecording {
request.endAudio() // Added line to mark end of recording
audioEngine.stop()
if let node = audioEngine.inputNode {
node.removeTap(onBus: 0)
}
recognitionTask?.cancel()
isRecording = false
startButton.backgroundColor = UIColor.gray
} else {
self.recordAndRecognizeSpeech()
isRecording = true
startButton.backgroundColor = UIColor.red
}
}
And 2. Update within recordAndRecognizeSpeech
from the recognitionTask = ...
line:
recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { (result, error) in
if result != nil { // check to see if result is empty (i.e. no speech found)
if let result = result {
let bestString = result.bestTranscription.formattedString
self.detectedTextLabel.text = bestString
var lastString: String = ""
for segment in result.bestTranscription.segments {
let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
lastString = bestString.substring(from: indexTo)
}
self.checkForColoursSaid(resultString: lastString)
} else if let error = error {
self.sendAlert(message: "There has been a speech recognition error")
print(error)
}
}
})
I hope that helps you.
I had this error because I was running the app on the Simulator. Running on a regular device solves the issue.