I am trying to write an iPhone application which will do some real-time camera image processing. I used the example presented in the AVFoundation docs as a starting point: sett
Set sessionPresent of capture session to AVCaptureSessionPresetLow as shown in the below sample code, this will increase the processing speed, but image from the buffer will be of low quality.
- (void)initCapture { AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo] error:nil]; AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init] ; captureOutput.alwaysDiscardsLateVideoFrames = YES; captureOutput.minFrameDuration = CMTimeMake(1, 25); dispatch_queue_t queue; queue = dispatch_queue_create("cameraQueue", NULL); [captureOutput setSampleBufferDelegate:self queue:queue]; dispatch_release(queue); NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; [captureOutput setVideoSettings:videoSettings]; self.captureSession = [[AVCaptureSession alloc] init] ; [self.captureSession addInput:captureInput]; [self.captureSession addOutput:captureOutput]; self.captureSession.sessionPreset=AVCaptureSessionPresetLow; /*sessionPresent choose appropriate value to get desired speed*/ if (!self.prevLayer) { self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession:self.captureSession]; } self.prevLayer.frame = self.view.bounds; self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill; [self.view.layer addSublayer: self.prevLayer]; }