首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >为什么在swift中合并两个视频或者更换背景音乐后,视频变成了黑屏?

为什么在swift中合并两个视频或者更换背景音乐后,视频变成了黑屏?
EN

Stack Overflow用户
提问于 2019-05-29 19:48:11
回答 1查看 521关注 0票数 2

在我的ios应用程序中,我想合并两个视频并更改背景音乐。我试过了,对于普通的视频,它工作得很好。但是,当我选择任何延时视频,然后尝试合并或更改背景音乐时,视频变成了全黑屏幕。

对于我的应用,我使用swift 4.2和xcode-10。我也尝试了swift 4和swift 5,都返回了相同的结果。

下面是我的代码:

代码语言:javascript
复制
class Export: NSObject {

    let defaultSize = CGSize(width: 1920, height: 1920)
    typealias Completion = (URL?, Error?) -> Void

    func mergeVideos(arrayVideos:[URL], exportURL: URL, completion:@escaping Completion) -> Void {

        var errors: Error!
        var insertTime = kCMTimeZero
        var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
        var outputSize = CGSize(width: 0, height: 0)

        // Determine video output size
        for url in arrayVideos {

            let videoAsset = AVAsset(url: url)
            let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]

            var videoSize = videoTrack.naturalSize.applying(videoTrack.preferredTransform)

            videoSize.width = fabs(videoSize.width)
            videoSize.height = fabs(videoSize.height)

            if outputSize.height == 0 || videoSize.height > outputSize.height {
                outputSize.height = videoSize.height
            }

            if outputSize.width == 0 || videoSize.width > outputSize.width {
                outputSize.width = videoSize.width
            }
        }

        // Silence sound (in case of video has no sound track)
        guard let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3") else { completion(nil, errors); return }
        let silenceAsset = AVAsset(url:silenceURL)
        let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first

        // Init composition
        let mixComposition = AVMutableComposition.init()

        for url in arrayVideos {

            let videoAsset = AVAsset(url: url)
            // Get video track
            guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else {

                print("video asset track not found")
                continue
            }

            // Get audio track
            var audioTrack:AVAssetTrack?
            if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
                audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
            }
            else {
                audioTrack = silenceSoundTrack
            }

            // Init video & audio composition track
            guard let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { completion(nil, errors); return }

            guard let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { completion(nil, errors); return }

            do {
                let startTime = kCMTimeZero
                let duration = videoAsset.duration

                // Add video track to video composition at specific time
                try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration),
                                                           of: videoTrack,
                                                           at: insertTime)

                // Add audio track to audio composition at specific time
                if let audioTrack = audioTrack {
                    try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration),
                                                               of: audioTrack,
                                                               at: insertTime)
                }

                // Add instruction for video track
                let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack, asset: videoAsset, standardSize: outputSize,  atTime: insertTime)

                // Hide video track before changing to new track
                let endTime = CMTimeAdd(insertTime, duration)
                layerInstruction.setOpacity(0, at: endTime)
                arrayLayerInstructions.append(layerInstruction)

                // Increase the insert time
                insertTime = CMTimeAdd(insertTime, duration)
            }
            catch {
                print("Load track error")
            }
        }

        // Main video composition instruction
        let mainInstruction = AVMutableVideoCompositionInstruction()
        mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
        mainInstruction.layerInstructions = arrayLayerInstructions

        // Main video composition
        let mainComposition = AVMutableVideoComposition() 
        mainComposition.instructions = [mainInstruction]
        mainComposition.frameDuration = CMTimeMake(1, 30)
        mainComposition.renderSize = outputSize

        // Init exporter
        guard let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else {

            errors = "exporter initialization failed" as? Error
            completion(nil, errors)
            return
        }
        exporter.outputURL = exportURL
        exporter.outputFileType = AVFileType.mov
        exporter.shouldOptimizeForNetworkUse = true
        exporter.videoComposition = mainComposition

        // Do export
        exporter.exportAsynchronously(completionHandler: {


        })

    }
}

// MARK:- Private methods
extension Export {
    fileprivate func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
        var assetOrientation = UIImageOrientation.up
        var isPortrait = false
        if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
            assetOrientation = .right
            isPortrait = true
        } else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
            assetOrientation = .left
            isPortrait = true
        } else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
            assetOrientation = .up
        } else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
            assetOrientation = .down
        }
        return (assetOrientation, isPortrait)
    }

    fileprivate func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {

        let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
        let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
        let assetSize = assetTrack.naturalSize

        let transform = assetTrack.preferredTransform
        let assetInfo = orientationFromTransform(transform: transform)

        let aspectFillRatio:CGFloat = 1

        if assetInfo.isPortrait {

            let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
            let posX = standardSize.width/2 - (assetSize.height * aspectFillRatio)/2
            let posY = standardSize.height/2 - (assetSize.width * aspectFillRatio)/2
            let moveFactor = CGAffineTransform(translationX: posX, y: posY)

            instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)

        } else {
            let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
            let posX = standardSize.width/2 - (assetSize.width * aspectFillRatio)/2
            let posY = standardSize.height/2 - (assetSize.height * aspectFillRatio)/2
            let moveFactor = CGAffineTransform(translationX: posX, y: posY)
            var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)

            if assetInfo.orientation == .down {
                let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
                concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
            }

            instruction.setTransform(concat, at: atTime)
        }
        return instruction
    }
}

我预计延时视频会像普通视频一样工作,而且不会出现黑屏

EN

回答 1

Stack Overflow用户

发布于 2021-09-14 08:40:25

//主视频构图说明

将此代码替换为以下代码

代码语言:javascript
复制
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: mutableComposition.duration)

    let videotrack = mutableComposition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
    let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
    
    let rgb = CGColorSpaceCreateDeviceRGB()
    let myColor : [CGFloat] = [1.0, 1.0, 1.0, 1.0] //white
    let ref = CGColor(colorSpace: rgb, components: myColor)
    instruction.backgroundColor = ref

    
    instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
    videoComposition.instructions = [instruction]
票数 2
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/56359931

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档