I am work with image overlay for watermark effect in video using swift.I am using AVFoundation for this but somehow I am not succeed.
Following is my code for overlay image/text
let path = NSBundle.mainBundle().pathForResource("sample_movie", ofType:"mp4")
let fileURL = NSURL(fileURLWithPath: path!)
let composition = AVMutableComposition()
var vidAsset = AVURLAsset(URL: fileURL, options: nil)
// get video track
let vtrack = vidAsset.tracksWithMediaType(AVMediaTypeVideo)
let videoTrack:AVAssetTrack = vtrack[0] as! AVAssetTrack
let vid_duration = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
var error: NSError?
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
compositionvideoTrack.insertTimeRange(vid_timerange, ofTrack: videoTrack, atTime: kCMTimeZero, error: &error)
compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
// Watermark Effect
let size = videoTrack.naturalSize
let imglogo = UIImage(named: "image.png")
let imglayer = CALayer()
imglayer.contents = imglogo?.CGImage
imglayer.frame = CGRectMake(5, 5, 100, 100)
imglayer.opacity = 0.6
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.whiteColor().CGColor
titleLayer.string = "Dummy text"
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.frame = CGRectMake(0, 50, size.width, size.height / 6)
let videolayer = CALayer()
videolayer.frame = CGRectMake(0, 0, size.width, size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRectMake(0, 0, size.width, size.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
parentlayer.addSublayer(titleLayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, inLayer: parentlayer)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject]
layercomposition.instructions = NSArray(object: instruction) as [AnyObject]
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)
let docsDir: AnyObject = dirPaths[0]
let movieFilePath = docsDir.stringByAppendingPathComponent("result.mov")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
assetExport.outputFileType = AVFileTypeQuickTimeMovie
assetExport.outputURL = movieDestinationUrl
assetExport.exportAsynchronouslyWithCompletionHandler({
switch assetExport.status{
case AVAssetExportSessionStatus.Failed:
println("failed \(assetExport.error)")
case AVAssetExportSessionStatus.Cancelled:
println("cancelled \(assetExport.error)")
default:
println("Movie complete")
// play video
NSOperationQueue.mainQueue().addOperationWithBlock({ () -> Void in
self.playVideo(movieDestinationUrl!)
})
}
})
By this code, I am not achieve overlay....I don't know what I am doing wrong...
Questions:
- Is there any missing thing in this code? Or any problem with this code?
- Is this code only worked with recorded video or all videos including videos from gallery?
The code provided by @El Captain would work. It's only missing:
assetExport.videoComposition = layercomposition
You can add this right after the instantiation of the AVAssetExportSession
NOTE: The code originally provided would only export the video track but not the audio track. If you need the audio track you could add something like this after you configure the compositionvideoTrack:
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, ofTrack: audioTrack, atTime: kCMTimeZero)
}
For me (what I see in your code), your are not adding the parentlayer to the screen.
You create a CALayer() to add videolayer, imglayer and titleLayer into a new layer but you don't add this last one on the screen.
yourView.layer.addSublayer(parentlayer)
Hope this help you
@Rey Hernandez this just helped me a lot! If anyone wants further clarification on how to add an audio asset with to the video here is the code to combine them
let vtrack = vidAsset.tracksWithMediaType(AVMediaTypeVideo)
let videoTrack:AVAssetTrack = vtrack[0]
let vid_duration = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
let atrack = vidAsset.tracksWithMediaType(AVMediaTypeAudio)
let audioTrack:AVAssetTrack = atrack[0]
let audio_duration = audioTrack.timeRange.duration
let audio_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
do {
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
try compositionvideoTrack.insertTimeRange(vid_timerange, ofTrack: videoTrack, atTime: kCMTimeZero)
compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
try! compositionAudioTrack.insertTimeRange(audio_timerange, ofTrack: audioTrack, atTime: kCMTimeZero)
compositionvideoTrack.preferredTransform = audioTrack.preferredTransform
} catch {
print(error)
}
To supplement, here's a function that creates CATextLayers based on an array UITextViews supplied by copying over their rotation, scale and font. Just add these to your container-layer supplied to AVVideoCompositionCoreAnimationTool:
private static func createTextLayer(totalSize: CGSize,
textView: UITextView) -> CATextLayer {
let textLayer: CACenteredTextLayer = CACenteredTextLayer()
textLayer.backgroundColor = UIColor.clear
textLayer.foregroundColor = textView.textColor?.cgColor
textLayer.masksToBounds = false
textLayer.isWrapped = true
let scale: CGFloat = UIScreen.main.scale
if let font: UIFont = textView.font {
let upscaledFont: UIFont = font.withSize(font.pointSize * scale)
let attributedString = NSAttributedString(
string: textView.text,
attributes: [NSAttributedString.Key.font: upscaledFont,
NSAttributedString.Key.foregroundColor: textView.textColor ?? UIColor.white])
textLayer.string = attributedString
}
// Set text alignment
let alignment: CATextLayerAlignmentMode
switch textView.textAlignment {
case NSTextAlignment.left:
alignment = CATextLayerAlignmentMode.left
case NSTextAlignment.center:
alignment = CATextLayerAlignmentMode.center
default:
alignment = CATextLayerAlignmentMode.right
}
textLayer.alignmentMode = alignment
let originalFrame: CGRect = textView.frame
// Also take scale into consideration
let targetSize: CGSize = CGSize(width: originalFrame.width * scale,
height: originalFrame.height * scale)
// The CALayer positioning is inverted on the Y-axes, so apply this
let origin: CGPoint = CGPoint(x: originalFrame.origin.x * scale,
y: (totalSize.height - (originalFrame.origin.y * scale)) - targetSize.height)
textLayer.frame = CGRect(x: origin.x,
y: origin.y,
width: targetSize.width,
height: targetSize.height)
// Determine the scale
textLayer.anchorPoint = CGPoint(x: 0.5,
y: 0.5)
var newTransform: CATransform3D = CATransform3DMakeScale(textView.transform.xScale,
textView.transform.yScale,
0)
// Convert to degrees, invert the amount and convert back to radians to apply
newTransform = CATransform3DRotate(newTransform,
textView.transform.radiansFor3DTransform,
0,
0,
1)
textLayer.transform = newTransform
return textLayer
}
Combine this with this subclassing of CATextLayer to center the text vertically:
final class CACenteredTextLayer: CATextLayer {
override func draw(in ctx: CGContext) {
guard let attributedString = string as? NSAttributedString else { return }
let height = self.bounds.size.height
let boundingRect: CGRect = attributedString.boundingRect(
with: CGSize(width: bounds.width,
height: CGFloat.greatestFiniteMagnitude),
options: NSStringDrawingOptions.usesLineFragmentOrigin,
context: nil)
let yDiff: CGFloat = (height - boundingRect.size.height) / 2
ctx.saveGState()
ctx.translateBy(x: 0.0, y: yDiff)
super.draw(in: ctx)
ctx.restoreGState()
}
}
private extension CGAffineTransform {
var xScale: CGFloat {
return sqrt((a*a) + (c*c))
}
var yScale: CGFloat {
return sqrt((b*b) + (d*d))
}
var radiansFor3DTransform: CGFloat {
let radians: CGFloat = atan2(b, a);
let degrees: CGFloat = -(radians * 180 / CGFloat.pi)
let convertedRadians: CGFloat = CGFloat(degrees * (CGFloat.pi / 180))
return convertedRadians
}
}
来源:https://stackoverflow.com/questions/32450356/image-text-overlay-in-video-swift