[英]ios OpenCV increases the image size
下面是將UIImage
轉換為cv::Mat
代碼
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image {
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols, rows;
if (image.imageOrientation == UIImageOrientationLeft || image.imageOrientation == UIImageOrientationRight) {
cols = image.size.height;
rows = image.size.width;
} else {
cols = image.size.width;
rows = image.size.height;
}
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
cv::Mat cvMatTest;
cv::transpose(cvMat, cvMatTest);
if (image.imageOrientation == UIImageOrientationLeft || image.imageOrientation == UIImageOrientationRight) {
} else {
return cvMat;
}
cvMat.release();
cv::flip(cvMatTest, cvMatTest, 1);
return cvMatTest;
}
和這段代碼為cv::Mat
到UIImage
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat {
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
經過一些預處理后,我將1080 * 1920(1.5 mb)的圖像轉換為cv::Mat
我將其轉換為UIImage
,這使我得到的圖像尺寸為2517 * 1527(6 mb)
即使在圖像處理后,我也不想增加圖像尺寸。 請指導我我做錯了什么
//作物行動
cv::Mat undistorted = cv::Mat( cvSize(maxWidth,maxHeight), CV_8UC4);
cv::Mat original = [MMOpenCVHelper cvMatFromUIImage:_adjustedImage];
cv::warpPerspective(original, undistorted, cv::getPerspectiveTransform(src, dst), cvSize(maxWidth, maxHeight));
您的代碼沒有調整圖像大小。 這一定是預處理的意外副作用,在此不做詳細介紹。
UIImage* image = self.testImage;
NSLog(@"original UIImage: %.0f %.0f", image.size.width,image.size.height);
cv::Mat matImage = [self cvMatFromUIImage:image];
NSLog(@"cv matImage: %d %d", matImage.cols,matImage.rows);
UIImage* newImage = [self UIImageFromCVMat:matImage];
NSLog(@"new UIImage: %.0f %.0f", newImage.size.width,newImage.size.height);
original UIImage: 720 960 cv matImage: 720 960 new UIImage: 720 960
編輯
在擴展問題之后,似乎warpPerspective
的輸出大小為cvSize(maxWidth,maxHeight)
-這就是您將得到的大小。 如果要結束輸入的大小,可以在轉換回UIImage
之前先進行resize()
。 或者簡單地將輸出mat
設置為與輸入相同的尺寸。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.