簡體   English   中英

ios - uiimage數組到視頻圖像的方向和比例

[英]ios - uiimage array to video image orientation and scale

我正在創建一個uiimage數組的視頻。

我使用了多個stackoverflow帖子並混合和修改了很多提供的代碼。

我將包含我目前正在使用的文件。

這些圖像目前正是這樣出現的,我認為這是因為它們變成了CGImage,它們不會保留方向?

此外,如何處理用戶拍攝水平照片時的縮放比例。

我基本上想要在拍攝時顯示圖像。

前兩幅圖像是垂直拍攝的,看起來向左翻轉90度,最后一張水平翻轉。

謝謝 在此輸入圖像描述

在此輸入圖像描述

在此輸入圖像描述

類:

import AVFoundation
import UIKit

let kErrorDomain = "TimeLapseBuilder"
let kFailedToStartAssetWriterError = 0
let kFailedToAppendPixelBufferError = 1

public class TimeLapseBuilder: NSObject {
  var photos: [UIImage]
  var videoWriter: AVAssetWriter?
  var outputSize = CGSizeMake(1920, 1080)

  public init(photos: [UIImage]) {
    self.photos = photos

    super.init()
  }

  public func build(outputSize outputSize: CGSize, progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {

    self.outputSize = outputSize
    var error: NSError?

    let startTime = NSDate.timeIntervalSinceReferenceDate()

    let fileManager = NSFileManager.defaultManager()
    let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
    guard let documentDirectory: NSURL = urls.first else {
      fatalError("documentDir Error")
    }

    let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")

    if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
      do {
        try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
      }catch{
        fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
      }
    }

    guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
      fatalError("AVAssetWriter error")
    }

    let outputSettings = [
      AVVideoCodecKey  : AVVideoCodecH264,
      AVVideoWidthKey  : NSNumber(float: Float(outputSize.width)),
      AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
    ]

    guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
      fatalError("Negative : Can't apply the Output settings...")
    }

    let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)

    let sourcePixelBufferAttributesDictionary = [
      kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
      kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)),
      kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height)),
    ]

    let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
      assetWriterInput: videoWriterInput,
      sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
    )

    assert(videoWriter.canAddInput(videoWriterInput))
    videoWriter.addInput(videoWriterInput)

    if videoWriter.startWriting() {
      videoWriter.startSessionAtSourceTime(kCMTimeZero)
      assert(pixelBufferAdaptor.pixelBufferPool != nil)

      let media_queue = dispatch_queue_create("mediaInputQueue", nil)

      videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
        let fps: Int32 = 1
        let frameDuration = CMTimeMake(1, fps)
        let currentProgress = NSProgress(totalUnitCount: Int64(self.photos.count))

        var frameCount: Int64 = 0

        while (!self.photos.isEmpty) {


          if (videoWriterInput.readyForMoreMediaData) {
            let nextPhoto = self.photos.removeAtIndex(0)
            let lastFrameTime = CMTimeMake(frameCount, fps)
            let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)



            if !self.appendPixelBufferForImage(nextPhoto, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
              error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
                userInfo: [
                  "description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
                  "rawError": videoWriter.error ?? "(none)"
                ])

              break
            }

            frameCount++

            currentProgress.completedUnitCount = frameCount
            progress(currentProgress)
          }
        }

        let endTime = NSDate.timeIntervalSinceReferenceDate()
        let elapsedTime: NSTimeInterval = endTime - startTime

        print("rendering time \(self.stringFromTimeInterval(elapsedTime))")


        videoWriterInput.markAsFinished()
        videoWriter.finishWritingWithCompletionHandler { () -> Void in
          if error == nil {
            success(videoOutputURL)
          }
        }
      })


    } else {
      error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
        userInfo: ["description": "AVAssetWriter failed to start writing"]
      )
    }

    if let error = error {
      failure(error)
    }
  }

  public func appendPixelBufferForImage(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
    var appendSucceeded = true

    autoreleasepool {

        var pixelBuffer: CVPixelBuffer? = nil
        let options: [NSObject : AnyObject] = [
            kCVPixelBufferCGImageCompatibilityKey : Int(true),
            kCVPixelBufferCGBitmapContextCompatibilityKey : Int(true)
        ]

        let status: CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, options as CFDictionaryRef, &pixelBuffer)

        if let pixelBuffer = pixelBuffer where status == 0 {
          let managedPixelBuffer = pixelBuffer

            print("Scaleeee \(image.scale)")
            print("Widthhhh \(image.size.width)")
            print("Heighttt \(image.size.height)")


            pixelBufferFromImage(image.CGImage!, pxbuffer: managedPixelBuffer, andSize: CGSize(width: image.size.width, height: image.size.height))

          appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)

        } else {
          NSLog("error: Failed to allocate pixel buffer from pool")
        }

    }

    return appendSucceeded
  }

    func pixelBufferFromImage(image: CGImageRef, pxbuffer: CVPixelBuffer, andSize size: CGSize){

        CVPixelBufferLockBaseAddress(pxbuffer, 0)
        let pxdata = CVPixelBufferGetBaseAddress(pxbuffer)
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()

        let context = CGBitmapContextCreate(pxdata, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(pxbuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

        CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))

        CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image)

        CVPixelBufferUnlockBaseAddress(pxbuffer, 0)
    }

//    //Old fillpixel method
//  func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer, contentMode:UIViewContentMode){
//
//    CVPixelBufferLockBaseAddress(pixelBuffer, 0)
////    
//    let data = CVPixelBufferGetBaseAddress(pixelBuffer)
//    let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
//    let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
//
//    CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
//    
//    let horizontalRatio = CGFloat(self.outputSize.width) / image.size.width
//    let verticalRatio = CGFloat(self.outputSize.height) / image.size.height
//    var ratio: CGFloat = 1
//    
////    print("horizontal ratio \(horizontalRatio)")
////    print("vertical ratio \(verticalRatio)")
////    print("ratio \(ratio)")
////    print("Image Width -  \(image.size.width). Image Height - \(image.size.height)")
//    
//    switch(contentMode) {
//    case .ScaleAspectFill:
//      ratio = max(horizontalRatio, verticalRatio)
//    case .ScaleAspectFit:
//      ratio = min(horizontalRatio, verticalRatio)
//    default:
//      ratio = min(horizontalRatio, verticalRatio)
//    }
//    
//    let newSize:CGSize = CGSizeMake(image.size.width * ratio, image.size.height * ratio)
//    
//    let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
//    let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
//
//    CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage)
//
//    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
//  }


  func stringFromTimeInterval(interval: NSTimeInterval) -> String {
    let ti = NSInteger(interval)
    let ms = Int((interval % 1) * 1000)
    let seconds = ti % 60
    let minutes = (ti / 60) % 60
    let hours = (ti / 3600)

    if hours > 0 {
      return NSString(format: "%0.2d:%0.2d:%0.2d.%0.2d", hours, minutes, seconds, ms) as String
    }else if minutes > 0 {
      return NSString(format: "%0.2d:%0.2d.%0.2d", minutes, seconds, ms) as String
    }else {
      return NSString(format: "%0.2d.%0.2d", seconds, ms) as String
    }
    }

}

這是UIImage的常見問題,因為方向通常來自相機上的EXIF數據。

我在UIImage上寫了一個擴展,允許我以正確的方向訪問圖像的實例。

extension UIImage {
    var fixedOrientation: UIImage {
        if self.imageOrientation == .Up {
            return self
        }

        var transform: CGAffineTransform = CGAffineTransformIdentity

        switch (self.imageOrientation) {
        case .Down:
            transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
            break
        case .DownMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
            break
        case .Left:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
            break
        case .LeftMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
            break
        case .Right:
            transform = CGAffineTransformTranslate(transform, 0, self.size.height)
            transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
            break
        case .RightMirrored:
            transform = CGAffineTransformTranslate(transform, 0, self.size.height)
            transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
            break
        case .Up:
            break
        case .UpMirrored:
            break
        }

        switch (self.imageOrientation) {
        case .UpMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break;
        case .DownMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break;
        case .LeftMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.height, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break
        case .RightMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.height, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break
        case .Up:
            break
        case .Right:
            break
        case .Down:
            break
        case .Left:
            break
        }

        let context = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height), CGImageGetBitsPerComponent(self.CGImage), 0, CGImageGetColorSpace(self.CGImage), CGImageGetBitmapInfo(self.CGImage).rawValue)
        CGContextConcatCTM(context, transform)

        switch (self.imageOrientation) {
        case .Left:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .LeftMirrored:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .Right:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .RightMirrored:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        default:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.width, self.size.height), self.CGImage)
            break
        }

        let cgImage = CGBitmapContextCreateImage(context)
        let uiImage = UIImage.init(CGImage: cgImage!)

        return uiImage
    }
}

同樣,如果您知道想要圖像的所需CGSize (通常可以使用當前視圖的邊界),那么您也可以將其用作UIImage的擴展。

extension UIImage {
    func resize(newSize: CGSize) -> UIImage {
        UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
        self.drawInRect(CGRectMake(0, 0, newSize.width, newSize.height))
        return UIGraphicsGetImageFromCurrentImageContext()
    }
}

我也很想知道這是否可以重構,但這總是對我有用 - 希望這有幫助!

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM