我正在录制3个视频,并将它们合并成一个。我需要所有视频的文本。我在做,但没有成功。视频变黑了,音频也没问题。当我在没有卡莱尔的情况下做的时候,一切都很好,需要帮助。我的代码有问题,或者我做错了,请指导我。
private func doMerge(arrayVideos:[AVAsset], animation:Bool, completion:@escaping Completion) -> Void {
var insertTime = CMTime.zero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
// let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
// let silenceAsset = AVAsset(url:silenceURL!)
// let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayVideos {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
// audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = CMTime.zero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Watermark Effect
let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = "Dummy text"
titleLayer.foregroundColor = UIColor.white.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
// videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: titleLayer)
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
// mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
把这部分改一下就行了
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
// let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = questions[counter]
counter = counter + 1
titleLayer.foregroundColor = UIColor.black.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
videolayer.backgroundColor = UIColor.red.cgColor
videolayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(titleLayer)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
我刚刚接触Android,最近我在我的应用程序中添加了一个CardView来显示一些文本框和图像,效果很好。 然而,当我试图显示视频时,卡仍然是空的。有人能帮我理解我做错了什么吗? 我这样定义了图像视图: 最初,我尝试将视频设置为相关活动的onCreate,如下所示: 那不起作用,不知道为什么。所以从相关的中,我使用从URL获取视频。由于UI更改,在onProgressMethod中使用相同的代码
我想知道如何使用Kitkat 4.4在android中录制我正在进行的屏幕视频。在谷歌上搜索了很多,所以我什么也没得到。 我已经看到了很多链接,告诉我们通过使用USB连接设备和系统,然后在命令窗口中写入一些命令来录制视频,但我只想使用设备录制视频,就像用户按下录制按钮时开始录制,按下停止按钮时停止录制一样。 我在Google play上看到了一些类似的应用,比如 https://play.goog
我已经在android应用程序中实现了Agora SDK视频直播。我想在回收器视图上添加多个视频。当我使用下面的代码时,屏幕显示为空白。 `@Override public void OnBindViewWholder(最终DataAdapter.ViewWholder持有者,最终int位置){
我正在尝试开发一个应用程序,允许我在录制视频时绘制视频,然后将录制的视频和视频保存在一个mp4文件中供以后使用。另外,我想使用camera2库,特别是我需要我的应用程序在高于API 21的设备上运行,我总是避免使用不推荐的库。 我尝试了很多方法,包括FFmpeg,其中我放置了TextureView的覆盖层。getBitmap()(来自摄影机)和从画布获取的位图。它工作正常,但由于它的功能很慢,视频
正常模式下不播放同一视频。 我的问题是, 当我尝试播放视频,我得到的只是空白视频,但我得到了所有的音频和字幕。 我的代码有什么错误。请给我任何建议。
问题内容: 我有一个拥有HTML5视频播放器的网站。 我想在Facebook上共享链接,并且用户之一在facebook上单击该帖子的图像,它就会开始在此处播放视频。 就像youtube视频和vimeo视频一样。 我怎样才能做到这一点? 谢谢 问题答案: 我有一个拥有HTML5视频播放器的网站。 您想找到一个好的SWF(*.SWF)视频播放器,该视频播放器可以将视频从(将参数传递给播放器) 现在,当