我想录制一系列剪辑,当通过视频播放器或ffmpeg -f concat一起播放时,可以无缝地回放。
在这两种情况下,我在每个段的连接点都有一个非常明显的音频问题。
我目前的策略是维护2个AssetWriter实例。在每个分界点,我启动一个新的写入器,等待它准备好,然后开始给它提供样本。当视频和音频样本在特定时间点完成时,我会关闭最后一个编写器。
如何修改它以获得连续的剪辑录制?根本原因是什么?
import Foundation
import UIKit
import AVFoundation
class StreamController: UIViewController, AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var previewView: UIView!
var closingVideoInput: AVAssetWriterInput?
var closingAudioInput: AVAssetWriterInput?
var closingAssetWriter: AVAssetWriter?
var currentVideoInput: AVAssetWriterInput?
var currentAudioInput: AVAssetWriterInput?
var currentAssetWriter: AVAssetWriter?
var nextVideoInput: AVAssetWriterInput?
var nextAudioInput: AVAssetWriterInput?
var nextAssetWriter: AVAssetWriter?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoHelper: VideoHelper?
var startTime: NSTimeInterval = 0
override func viewDidLoad() {
super.viewDidLoad()
startTime = NSDate().timeIntervalSince1970
createSegmentWriter()
videoHelper = VideoHelper()
videoHelper!.delegate = self
videoHelper!.startSession()
NSTimer.scheduledTimerWithTimeInterval(5, target: self, selector: "createSegmentWriter", userInfo: nil, repeats: true)
}
func createSegmentWriter() {
print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: OutputFileNameHelper.instance.pathForOutput()), fileType: AVFileTypeMPEG4)
nextAssetWriter!.shouldOptimizeForNetworkUse = true
let videoSettings: [String:AnyObject] = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: 960, AVVideoHeightKey: 540]
nextVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
nextVideoInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextVideoInput!)
let audioSettings: [String:AnyObject] = [
AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC),
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
]
nextAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
nextAudioInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextAudioInput!)
nextAssetWriter!.startWriting()
}
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
previewLayer = AVCaptureVideoPreviewLayer(session: videoHelper!.captureSession)
previewLayer!.frame = self.previewView.bounds
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
if ((previewLayer?.connection?.supportsVideoOrientation) != nil) {
previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
}
self.previewView.layer.addSublayer(previewLayer!)
}
func closeWriter() {
if videoFinished && audioFinished {
let outputFile = closingAssetWriter?.outputURL.pathComponents?.last
closingAssetWriter?.finishWritingWithCompletionHandler() {
let delta = NSDate().timeIntervalSince1970 - self.startTime
print("segment \(outputFile) finished at t=\(delta)")
}
self.closingAudioInput = nil
self.closingVideoInput = nil
self.closingAssetWriter = nil
audioFinished = false
videoFinished = false
}
}
func closingVideoFinished() {
if closingVideoInput != nil {
videoFinished = true
closeWriter()
}
}
func closingAudioFinished() {
if closingAudioInput != nil {
audioFinished = true
closeWriter()
}
}
var closingTime: CMTime = kCMTimeZero
var audioFinished = false
var videoFinished = false
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBufferRef, fromConnection connection: AVCaptureConnection!) {
let sampleTime: CMTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if let nextWriter = nextAssetWriter {
if nextWriter.status.rawValue != 0 {
print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
closingAssetWriter = currentAssetWriter
closingVideoInput = currentVideoInput
closingAudioInput = currentAudioInput
currentAssetWriter = nextAssetWriter
currentVideoInput = nextVideoInput
currentAudioInput = nextAudioInput
nextAssetWriter = nil
nextVideoInput = nil
nextAudioInput = nil
closingTime = sampleTime
currentAssetWriter!.startSessionAtSourceTime(sampleTime)
}
}
if currentAssetWriter != nil {
if let _ = captureOutput as? AVCaptureVideoDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if closingVideoInput?.readyForMoreMediaData == true {
closingVideoInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingVideoFinished()
if currentVideoInput?.readyForMoreMediaData == true {
currentVideoInput?.appendSampleBuffer(sampleBuffer)
}
}
} else if let _ = captureOutput as? AVCaptureAudioDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingAudioFinished()
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
}
}
}
}
override func shouldAutorotate() -> Bool {
return true
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return [UIInterfaceOrientationMask.LandscapeRight]
}
}发布于 2015-11-21 06:48:17
我认为根本原因是由于视频和音频CMSampleBuffer代表不同的时间间隔。您需要拆分和加入音频CMSampleBuffer,以使它们无缝地插入到AVAssetWriter的时间线中,该时间线可能应该基于视频演示时间戳。
为什么必须改变音频而不是视频?它看起来不对称,但我猜这是因为音频有更高的采样率。
附注:实际上,创建新的拆分样本缓冲区看起来很可怕。CMSampleBufferCreate有一大堆论据。CMSampleBufferCopySampleBufferForRange可能更容易和更有效地使用。
https://stackoverflow.com/questions/33829518
复制相似问题