[英]TesseractOCR BAD_ACCESS
我從ARKit捕獲一個幀並從中獲取CVPixelBuffer
func session(_ session: ARSession, didUpdate frame: ARFrame) {
if self.detectionFrame != nil {
return
}
self.detectionFrame = frame
// Retain the image buffer for Vision processing.
let pixelBuffer = frame.capturedImage
DispatchQueue.global().async {
self.recognizeText(from: pixelBuffer)
}
}
在recognizeText
我進行到初始化超正方體並將其轉換為一個后通過圖像UIImage
。
func recognizeText(from image:CVPixelBuffer){
// 1
if let tesseract = MGTesseract(language: "jpn+jpn_vert") {
// 2
tesseract.engineMode = .tesseractCubeCombined
// 3
tesseract.pageSegmentationMode = .auto
// 4
let ciImage = CIImage(cvPixelBuffer: image)
tesseract.image = UIImage(ciImage: ciImage)
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}
}
這個結果總是在
Thread 15: EXC_BAD_ACCESS (code=1, address=0x0)
在
- (Pix *)pixForImage:(UIImage *)image
{
int width = image.size.width;
int height = image.size.height;
CGImage *cgImage = image.CGImage;
CFDataRef imageData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage));
const UInt8 *pixels = CFDataGetBytePtr(imageData); <<< EXC_BAD_ACCESS
size_t bitsPerPixel = CGImageGetBitsPerPixel(cgImage);
size_t bytesPerPixel = bitsPerPixel / 8;
size_t bytesPerRow = CGImageGetBytesPerRow(cgImage);
我究竟做錯了什么?
找到缺失的部分,將緩沖區轉換為UIImage,您需要提供CIContext和緩沖區大小
let ciImage = CIImage(cvPixelBuffer: pixBuffer)
let ciContext = CIContext(options: nil)
if let videoImage = ciContext.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixBuffer), height: CVPixelBufferGetHeight(pixBuffer))) {
self.prcessedImage = UIImage(cgImage: videoImage )
tesseract.image = self.prcessedImage
// 5
tesseract.recognize()
// 6
let text = tesseract.recognizedText
print(text ?? "")
}
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.