[英]Swift - AVCaptureSession, how to captureOutput
我是 Swift 的新手,正在嘗試為我使用AVCaptureSession
創建的實時攝像頭源實現文本識別。
實時視圖顯示為 UIView 的子層。 在過去的幾個小時里,我一直在努力將其輸出捕獲到 CMSampleBuffer 中。 我已經上網了,但是這個話題似乎很少見。
如果我錯了,請糾正我,但我認為captureOutput
將是去這里的功能 - 不幸的是,我不知道如何填寫它。
我的 CameraViewController:
import UIKit
import AVFoundation
class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var toolbar: UIToolbar!
@IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession?
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer?
var videoOutput: AVCaptureVideoDataOutput?
override func viewDidLoad() {
super.viewDidLoad()
prevLayer?.frame.size = cameraView.frame.size
createSession()
self.toolbar.setBackgroundImage(UIImage(),
forToolbarPosition: .any,
barMetrics: .default)
self.toolbar.setShadowImage(UIImage(), forToolbarPosition: .any)
}
@IBAction func goBack(_ sender: Any) {
performSegue(withIdentifier: "goBackToAdd", sender: self)
}
func createSession() {
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
}
if let input = input{
session?.addInput(input)
}
prevLayer = AVCaptureVideoPreviewLayer(session: session!)
prevLayer?.frame.size = cameraView.frame.size
prevLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
prevLayer?.connection?.videoOrientation = transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
cameraView.layer.addSublayer(prevLayer!)
session?.startRunning()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTelephotoCamera, .builtInTrueDepthCamera, .builtInWideAngleCamera, ], mediaType: .video, position: position)
if let device = deviceDiscoverySession.devices.first {
return device
}
return nil
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
coordinator.animate(alongsideTransition: { (context) -> Void in
self.prevLayer?.connection?.videoOrientation = self.transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
self.prevLayer?.frame.size = self.cameraView.frame.size
}, completion: { (context) -> Void in
})
super.viewWillTransition(to: size, with: coordinator)
}
func transformOrientation(orientation: UIInterfaceOrientation) -> AVCaptureVideoOrientation {
switch orientation {
case .landscapeLeft:
return .landscapeLeft
case .landscapeRight:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
<#code#>
}
}
在 captureOutput 中獲取 UIImage。
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
UIImage *image = [self imageFromSampleBuffer:sampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
//< Add your code here that uses the image >
}
}
// Create a UIImage from sample buffer data
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
NSLog(@"imageFromSampleBuffer: called");
// Get a CMSampleBuffer's Core Video image buffer for the media data
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Get the number of bytes per row for the pixel buffer
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the number of bytes per row for the pixel buffer
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Create a bitmap graphics context with the sample buffer data
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
// Create a Quartz image from the pixel data in the bitmap graphics context
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// Free up the context and color space
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// Create an image object from the Quartz image
UIImage *image = [UIImage imageWithCGImage:quartzImage];
// Release the Quartz image
CGImageRelease(quartzImage);
return (image);
}
這很簡單,首先你需要制作 AVCaptureOutput 並將委托設置為一個對象,它的類符合 AVCaptureVideoDataOutputSampleBufferDelegate。 對於那個類,只需實現該方法
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let image = CIImage(cvImageBuffer: imageBuffer)
// ... render the image to a custom layer or write to a AVAsset as you want
}
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.