Rendering a SceneKit scene to video output

放肆的年华 提交于 2019-12-17 21:57:21

问题


As a primarily high-level/iOS dev, I'm interested in using SceneKit for animation projects.

I've been having fun with SceneKit for some months now, despite it obviously being designed for 'live' interaction, I would find it incredibly useful to be able to 'render' an SKScene to video. Currently, I've been using Quicktime's screen recorder to capture video output, but (of course) the frame-rate drops in doing so. Is there an alternative that allows a scene to be rendered at its own pace and outputted as a smooth video file?

I understand this is unlikely to be possible... Just thought I'd ask in case I was missing something lower-level!


回答1:


You could use an SCNRenderer to render to a CGImage offscreen, then add the CGImage to a video stream using AVFoundation.

I wrote this Swift extension for rendering into a CGImage.

public extension SCNRenderer {

    public func renderToImageSize(size: CGSize, floatComponents: Bool, atTime time: NSTimeInterval) -> CGImage? {

        var thumbnailCGImage: CGImage?

        let width = GLsizei(size.width), height = GLsizei(size.height)
        let samplesPerPixel = 4

        #if os(iOS)
            let oldGLContext = EAGLContext.currentContext()
            let glContext = unsafeBitCast(context, EAGLContext.self)

            EAGLContext.setCurrentContext(glContext)
            objc_sync_enter(glContext)
        #elseif os(OSX)
            let oldGLContext = CGLGetCurrentContext()
            let glContext = unsafeBitCast(context, CGLContextObj.self)

            CGLSetCurrentContext(glContext)
            CGLLockContext(glContext)
        #endif

        // set up the OpenGL buffers
        var thumbnailFramebuffer: GLuint = 0
        glGenFramebuffers(1, &thumbnailFramebuffer)
        glBindFramebuffer(GLenum(GL_FRAMEBUFFER), thumbnailFramebuffer); checkGLErrors()

        var colorRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &colorRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), colorRenderbuffer)
        if floatComponents {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA16F), width, height)
        } else {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA8), width, height)
        }
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_RENDERBUFFER), colorRenderbuffer); checkGLErrors()

        var depthRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &depthRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), depthRenderbuffer)
        glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_DEPTH_COMPONENT24), width, height)
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_DEPTH_ATTACHMENT), GLenum(GL_RENDERBUFFER), depthRenderbuffer); checkGLErrors()

        let framebufferStatus = Int32(glCheckFramebufferStatus(GLenum(GL_FRAMEBUFFER)))
        assert(framebufferStatus == GL_FRAMEBUFFER_COMPLETE)
        if framebufferStatus != GL_FRAMEBUFFER_COMPLETE {
            return nil
        }

        // clear buffer
        glViewport(0, 0, width, height)
        glClear(GLbitfield(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)); checkGLErrors()

        // render
        renderAtTime(time); checkGLErrors()

        // create the image
        if floatComponents { // float components (16-bits of actual precision)

            // slurp bytes out of OpenGL
            typealias ComponentType = Float

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_FLOAT), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue | CGBitmapInfo.FloatComponents.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()

        } else { // byte components

            // slurp bytes out of OpenGL
            typealias ComponentType = UInt8

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()
        }

        #if os(iOS)
            objc_sync_exit(glContext)
            if oldGLContext != nil {
                EAGLContext.setCurrentContext(oldGLContext)
            }
        #elseif os(OSX)
            CGLUnlockContext(glContext)
            if oldGLContext != nil {
                CGLSetCurrentContext(oldGLContext)
            }
        #endif

        return thumbnailCGImage
    }
}


func checkGLErrors() {
    var glError: GLenum
    var hadError = false
    do {
        glError = glGetError()
        if glError != 0 {
            println(String(format: "OpenGL error %#x", glError))
            hadError = true
        }
    } while glError != 0
    assert(!hadError)
}



回答2:


** This is the answer for SceneKit using Metal.

** Warning: This may not be a proper method for App Store. But it's working.

Step 1: Swap the method of nextDrawable of CAMetalLayer with a new one using swizzling. Save the CAMetalDrawable for each render loop.

extension CAMetalLayer {
  public static func setupSwizzling() {
    struct Static {
      static var token: dispatch_once_t = 0
    }

    dispatch_once(&Static.token) {
      let copiedOriginalSelector = #selector(CAMetalLayer.orginalNextDrawable)
      let originalSelector = #selector(CAMetalLayer.nextDrawable)
      let swizzledSelector = #selector(CAMetalLayer.newNextDrawable)

      let copiedOriginalMethod = class_getInstanceMethod(self, copiedOriginalSelector)
      let originalMethod = class_getInstanceMethod(self, originalSelector)
      let swizzledMethod = class_getInstanceMethod(self, swizzledSelector)

      let oldImp = method_getImplementation(originalMethod)
      method_setImplementation(copiedOriginalMethod, oldImp)
      method_exchangeImplementations(originalMethod, swizzledMethod)
    }
  }


  func newNextDrawable() -> CAMetalDrawable? {
    let drawable = orginalNextDrawable()
    // Save the drawable to any where you want
    AppManager.sharedInstance.currentSceneDrawable = drawable
    return drawable
  }

  func orginalNextDrawable() -> CAMetalDrawable? {
    // This is just a placeholder. Implementation will be replaced with nextDrawable.
    return nil
  }
}

Step 2: Setup the swizzling in AppDelegate: didFinishLaunchingWithOptions

func application(application: UIApplication, didFinishLaunchingWithOptions launchOptions: [NSObject: AnyObject]?) -> Bool {
  CAMetalLayer.setupSwizzling()
  return true
}

Step 3: Disable framebufferOnly for your's SCNView's CAMetalLayer (In order to call getBytes for MTLTexture)

if let metalLayer = scnView.layer as? CAMetalLayer {
  metalLayer.framebufferOnly = false
}

Step 4: In your SCNView's delegate (SCNSceneRendererDelegate), play with the texture

func renderer(renderer: SCNSceneRenderer, didRenderScene scene: SCNScene, atTime time: NSTimeInterval) {
    if let texture = AppManager.sharedInstance.currentSceneDrawable?.texture where !texture.framebufferOnly {
      AppManager.sharedInstance.currentSceneDrawable = nil
      // Get image from texture
      let image = texture.toImage()
      // Use the image for video recording
    }
}

extension MTLTexture {
  func bytes() -> UnsafeMutablePointer<Void> {
    let width = self.width
    let height   = self.height
    let rowBytes = self.width * 4
    let p = malloc(width * height * 4) //Beware for memory leak
    self.getBytes(p, bytesPerRow: rowBytes, fromRegion: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
    return p
  }

  func toImage() -> UIImage? {
    var uiImage: UIImage?
    let p = bytes()
    let pColorSpace = CGColorSpaceCreateDeviceRGB()
    let rawBitmapInfo = CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue
    let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)

    let selftureSize = self.width * self.height * 4
    let rowBytes = self.width * 4
    let provider = CGDataProviderCreateWithData(nil, p, selftureSize, {_,_,_ in })!

    if let cgImage = CGImageCreate(self.width, self.height, 8, 32, rowBytes, pColorSpace, bitmapInfo, provider, nil, true, CGColorRenderingIntent.RenderingIntentDefault) {
      uiImage = UIImage(CGImage: cgImage)
    }
    return uiImage
  }

  func toImageAsJpeg(compressionQuality: CGFloat) -> UIImage? {
  }
}

Step 5 (Optional): You may need to confirm the drawable at CAMetalLayer you are getting is your target. (If more then one CAMetalLayer at the same time)




回答3:


It would actually be pretty easy! Here's a pseudo code of how I would do it (on the SCNView):

int numberOfFrames = 300;
int currentFrame = 0;
int framesPerSecond = 30;

-(void) renderAFrame{
    [self renderAtTime:1/framesPerSecond];

    NSImage *frame = [self snapshot];

    // save the image with the frame number in the name such as f_001.png

    currentFrame++;

    if(currentFrame < numberOfFrames){
        [self renderAFrame];
    }

}

It will output you a sequence of images, rendered at 30 frames per second, that you can import in any editing software and convert to video.




回答4:


You can do it this way with a SKVideoNode you put into a SKScene that you use to map as a SCNode's SCMaterial.Diffuse.Content (Hope that's clear ;) )

        player                                                              = AVPlayer(URL: fileURL!)
        let videoSpriteKitNodeLeft                                          = SKVideoNode(AVPlayer: player)
        let videoNodeLeft                                                   = SCNNode()
        let spriteKitScene1                                                 = SKScene(size: CGSize(width: 1280 * screenScale, height: 1280 * screenScale))
        spriteKitScene1.shouldRasterize                                     = true

        videoNodeLeft.geometry                                              = SCNSphere(radius: 30)
        spriteKitScene1.scaleMode                                           = .AspectFit
        videoSpriteKitNodeLeft.position                                     = CGPoint(x: spriteKitScene1.size.width / 2.0, y: spriteKitScene1.size.height / 2.0)
        videoSpriteKitNodeLeft.size                                         = spriteKitScene1.size

        spriteKitScene1.addChild(videoSpriteKitNodeLeft)

        videoNodeLeft.geometry?.firstMaterial?.diffuse.contents             = spriteKitScene1
        videoNodeLeft.geometry?.firstMaterial?.doubleSided                  = true

        // Flip video upside down, so that it's shown in the right position
        var transform                                                       = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)                transform                                                           = SCNMatrix4Translate(transform, 1.0, 1.0, 0.0)

        videoNodeLeft.pivot                                                 = SCNMatrix4MakeRotation(Float(M_PI_2), 0.0, -1.0, 0.0)
        videoNodeLeft.geometry?.firstMaterial?.diffuse.contentsTransform    = transform

        videoNodeLeft.position                                              = SCNVector3(x: 0, y: 0, z: 0)

        scene.rootNode.addChildNode(videoNodeLeft)

I've extracted the code from a github project of mine for a 360 video player using SceneKit to play a video inside a 3D Sphere: https://github.com/Aralekk/simple360player_iOS/blob/master/simple360player/ViewController.swift

I hope this helps !

Arthur



来源:https://stackoverflow.com/questions/29060465/rendering-a-scenekit-scene-to-video-output

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!