Here is my code
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
// Create a UIImage from the sample buffer data
UIImage *image = [self imageFromSampleBuffer:sampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
[[self imgView] setImage:image];
});
}
Create a UIImage from sample buffer data
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer.
CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get the number of bytes per row for the pixel buffer.
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height.
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Create a device-dependent RGB color space.
static CGColorSpaceRef colorSpace = NULL;
if (colorSpace == NULL) {
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL) {
// Handle the error appropriately.
return nil;
}
}
// Get the base address of the pixel buffer.
uint8_t *baseAddress = malloc( bytesPerRow * height );
memcpy( baseAddress, CVPixelBufferGetBaseAddress(imageBuffer), bytesPerRow * height );
// Get the data size for contiguous planes of the pixel buffer.
size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
// Create a Quartz direct-access data provider that uses data we supply.
CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL);
// Create a bitmap image from data supplied by the data provider.
CGImageRef cgImage = CGImageCreate(width, height, 8, 32, bytesPerRow,
colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little, dataProvider, NULL, true, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
// Create and return an image object to represent the Quartz image.
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
free(baseAddress);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
return image;
}
Still i am getting no image in my image view can anyone help me find mistake.
Thanks in advance.
This is the code that made my work:
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
// Create a UIImage from the sample buffer data
UIImage *img = [self imageFromSampleBuffer:sampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
self.imgView.image=img;
});
}
// Create a UIImage from sample buffer data
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
// Get a CMSampleBuffer's Core Video image buffer for the media data
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Get the number of bytes per row for the pixel buffer
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Get the number of bytes per row for the pixel buffer
u_int8_t *baseAddress = (u_int8_t *)malloc(bytesPerRow*height);
memcpy( baseAddress, CVPixelBufferGetBaseAddress(imageBuffer), bytesPerRow * height );
// size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Create a bitmap graphics context with the sample buffer data
//The context draws into a bitmap which is `width'
// pixels wide and `height' pixels high. The number of components for each
// pixel is specified by `space'
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst);
// Create a Quartz image from the pixel data in the bitmap graphics context
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// Free up the context and color space
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// Create an image object from the Quartz image
UIImage *image = [UIImage imageWithCGImage:quartzImage scale:1.0 orientation:UIImageOrientationRight];
free(baseAddress);
// Release the Quartz image
CGImageRelease(quartzImage);
return (image);
}
Try this:
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
return [UIImage imageWithCGImage:newImage];
CGImageRelease(newImage);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
Use a GLKView to speed up rendering; then, insert this code in your delegate method:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CMFormatDescriptionRef formatDesc = CMSampleBufferGetFormatDescription(sampleBuffer);
// update the video dimensions information
_currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDesc);
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// If you want to apply filters to your UIImage before you display it...
CIImage *tempImage = [CIImage imageWithCVPixelBuffer:(CVPixelBufferRef)imageBuffer options:nil];
CIFilter *filter = [CIFilter filterWithName: @"CIExposureAdjust" keysAndValues:@"inputImage", tempImage, nil];
[filter setValue:@(self.tag) forKey:@"inputEV"];
CIFilter *filter2 = [CIFilter filterWithName: @"CILinearToSRGBToneCurve" keysAndValues:@"inputImage", [filter valueForKey: @"outputImage"], nil];
CIFilter *filter3 = [CIFilter filterWithName: @"CIGammaAdjust" keysAndValues:@"inputImage", [filter2 valueForKey: @"outputImage"], nil];
[filter3 setValue:@(self.tag) forKey:@"inputPower"];
CIImage *sourceImage = [filter3 valueForKey: @"outputImage"];
CGRect sourceExtent = sourceImage.extent;
CGFloat sourceAspect = sourceExtent.size.width / sourceExtent.size.height;
CGFloat previewAspect = _videoPreviewViewBounds.size.width / _videoPreviewViewBounds.size.height;
// we want to maintain the aspect radio of the screen size, so we clip the video image
CGRect drawRect = sourceExtent;
if (sourceAspect > previewAspect)
{
// use full height of the video image, and center crop the width
drawRect.origin.x += (drawRect.size.width - drawRect.size.height * previewAspect) / 2.0;
drawRect.size.width = drawRect.size.height * previewAspect;
}
else
{
// use full width of the video image, and center crop the height
drawRect.origin.y += (drawRect.size.height - drawRect.size.width / previewAspect) / 2.0;
drawRect.size.height = drawRect.size.width / previewAspect;
}
[_videoPreviewView bindDrawable];
if (_eaglContext != [EAGLContext currentContext])
[EAGLContext setCurrentContext:_eaglContext];
// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
// set the blend mode to "source over" so that CI will use that
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
[_ciContext drawImage:sourceImage inRect:_videoPreviewViewBounds fromRect:drawRect];
[_videoPreviewView display];
}
You can always convert the CIImage to UIImage easily; but, why? By rendering the image into a EAGLContext, you get GPU-accelerated image display. I can't think of why you'd need UIImage unless that's the only way you know.
Note that this code could be reduced to just a few lines, but rarely does anyone need anything less than a way to correctly orient and color frames from a video file when they're working with sample buffers. This covers everything you'd need, if that's the case. (I think you're going to find difficulty getting the UIImage generated from a sample buffer to orient itself correctly...)
Try to use
UIImage *image = [[UIImage alloc] initWithCGImage:];
instead of
UIImage *image = [UIImage imageWithCGImage:cgImage];
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.