AVAsset是AVFoundation中的一个抽象类,用来代表多媒体资源,比如,音频,视频等。
AVURLAsset是AVAsset的子类,是一个具体类,用URL来进行初始化
AVMutableComposition结合了媒体数据,可以看成是track(音频轨道)的集合,用来合成音视频。
AVMutableCompositionTrack用来表示一个track,包含了媒体类型、音轨标识符等信息,可以插入、删除、缩放track片段。
AVAssetTrack表示素材轨道。
AVAssetExportSession用来对一个AVAsset源对象进行转码,并导出为事先设置好的格式。
1.*****************视频合成****************
参考:http://www.hangge.com/blog/cache/detail_1184.html
/**
自定义相机:拍照、录像、前后摄像头切换、闪光灯开关、直接保存单个视频片段到相册、多个视频片段合成后保存到系统相册(这个还有点问题,oc的可以)
1.前置和后置摄像头
typedef NS_ENUM(NSInteger, AVCaptureDevicePosition) {
AVCaptureDevicePositionUnspecified = 0,
AVCaptureDevicePositionBack = 1,
AVCaptureDevicePositionFront = 2
} NS_AVAILABLE(10_7, 4_0);
2.闪光灯开关
typedef NS_ENUM(NSInteger, AVCaptureFlashMode) {
AVCaptureFlashModeOff = 0,
AVCaptureFlashModeOn = 1,
AVCaptureFlashModeAuto = 2
} NS_AVAILABLE(10_7, 4_0);
3.手电筒开关–其实就是相机的闪光灯
typedef NS_ENUM(NSInteger, AVCaptureTorchMode) {
AVCaptureTorchModeOff = 0,
AVCaptureTorchModeOn = 1,
AVCaptureTorchModeAuto = 2,
} NS_AVAILABLE(10_7, 4_0);
4.焦距模式调整
typedef NS_ENUM(NSInteger, AVCaptureFocusMode) {
AVCaptureFocusModeLocked = 0,
AVCaptureFocusModeAutoFocus = 1,
AVCaptureFocusModeContinuousAutoFocus = 2,
} NS_AVAILABLE(10_7, 4_0);
5.曝光量调节
typedef NS_ENUM(NSInteger, AVCaptureExposureMode) {
AVCaptureExposureModeLocked = 0,
AVCaptureExposureModeAutoExpose = 1,
AVCaptureExposureModeContinuousAutoExposure = 2,
AVCaptureExposureModeCustom NS_ENUM_AVAILABLE_IOS(8_0) = 3,
} NS_AVAILABLE(10_7, 4_0);
6.白平衡
typedef NS_ENUM(NSInteger, AVCaptureWhiteBalanceMode) {
AVCaptureWhiteBalanceModeLocked = 0,
AVCaptureWhiteBalanceModeAutoWhiteBalance = 1,
AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance = 2,
} NS_AVAILABLE(10_7, 4_0);
7.距离调整
typedef NS_ENUM(NSInteger, AVCaptureAutoFocusRangeRestriction) {
AVCaptureAutoFocusRangeRestrictionNone = 0,
AVCaptureAutoFocusRangeRestrictionNear = 1,
AVCaptureAutoFocusRangeRestrictionFar = 2,
} NS_AVAILABLE_IOS(7_0);
这里的获取音频轨道和合成后保存到相册还有点问题,
*/
import UIKit
import AVFoundation
import Photos
import AVKit
@available(iOS 10.0, *)
@available(iOS 11.0, *)
class LYBAutoDefineCameraVC: UIViewController ,AVCapturePhotoCaptureDelegate , AVCaptureFileOutputRecordingDelegate{
var device:AVCaptureDevice!//获取设备:如摄像头
var input:AVCaptureDeviceInput!//输入流
var photoOutput:AVCapturePhotoOutput! //照片输出流,(ios10之前用的AVCaptureStillImageOutput,现在已经弃用)
var movieoutput:AVCaptureMovieFileOutput! //录像输出流
var session:AVCaptureSession!//会话,协调着intput到output的数据传输,input和output的桥梁
var previewLayer:AVCaptureVideoPreviewLayer! //图像预览层,实时显示捕获的图像
var setting:AVCapturePhotoSettings? 图像设置,
var photoButton: UIButton?//拍照按钮
var imageView: UIImageView? //拍照后的成像
var image: UIImage? //拍照后的成像
var isJurisdiction: Bool?//是否获取了拍照标示
var flashBtn:UIButton? //闪光灯按钮
var videoConnection: AVCaptureConnection?//捕获链接
var isflash:Bool=false//控制闪光灯开关
//保存所有的录像片段数组
var videoAssets = [AVAsset]()
//保存所有的录像片段url数组
var assetURLs = [String]()
//单独录像片段的index索引
var appendix: Int32 = 1
//最大允许的录制时间(秒)
let totalSeconds: Float64 = 15.00
//每秒帧数
var framesPerSecond:Int32 = 30
//剩余时间
var remainingTime : TimeInterval = 15.0
//表示是否停止录像
var stopRecording: Bool = false
//剩余时间计时器
var timer: Timer?
//进度条计时器
var progressBarTimer: Timer?
//进度条计时器时间间隔
var incInterval: TimeInterval = 0.05
//进度条
var progressBar: UIView = UIView()
//当前进度条终点位置
var oldX: CGFloat = 0
//录制、保存按钮
var recordButton, saveButton : UIButton!
//视频片段合并后的url
var outputURL: NSURL?
override func viewDidLoad() {
customCamera()//自定义相机
customUI() //自定义相机按钮
}
// // MARK: - 检查相机权限
// func canUserCamear() -> Bool {
// //获取相册权限
// PHPhotoLibrary.requestAuthorization({ (status) in
// switch status {
// case .notDetermined:
//
// break
// case .restricted://此应用程序没有被授权访问的照片数据
// break
// case .denied://用户已经明确否认了这一照片数据的应用程序访问
// break
// case .authorized://已经有权限
// break
// default:
// break
// }
// return true
// }
//MARK: 初始化自定义相机
func customCamera(){
//创建摄像头
guard let devices = AVCaptureDevice.devices(for: AVMediaType.video) as? [AVCaptureDevice] else { return } //初始化摄像头设备
guard let devic = devices.filter({ return $0.position == .back }).first else{ return}
device = devic
//照片输出设置x
setting=AVCapturePhotoSettings.init(format: [AVVideoCodecKey:AVVideoCodecType.jpeg])
//用输入设备初始化输入
self.input = try? AVCaptureDeviceInput(device: device)
//照片输出流初始化
self.photoOutput = AVCapturePhotoOutput.init()
//录像输出流初始化
self.movieoutput = AVCaptureMovieFileOutput.init()
//生成会话
self.session = AVCaptureSession.init()
//输出画面质量
if(self.session.canSetSessionPreset(AVCaptureSession.Preset(rawValue: "AVCaptureSessionPreset1280x720"))){
self.session.sessionPreset = AVCaptureSession.Preset(rawValue: "AVCaptureSessionPreset1280x720")
}
//添加摄像头输入到会话中
if(self.session.canAddInput(self.input)){
self.session.addInput(self.input)
}
//添加一个音频输入设备
let audioCaptureDevice=AVCaptureDevice.devices(for: AVMediaType.audio).first
let audioInput=try? AVCaptureDeviceInput.init(device: audioCaptureDevice!)
if(self.session.canAddInput(audioInput!)){
self.session.canAddInput(audioInput!)
}
//添加照片输出流到会话中
if(self.session.canAddOutput(self.photoOutput)){
self.session.addOutput(self.photoOutput)
}
//添加视频输出流到会话中
if(self.session.canAddOutput(self.movieoutput)){
self.session.addOutput(self.movieoutput)
}
//使用self.session,初始化预览层,self.session负责驱动input进行信息的采集,layer负责把图像渲染显示
self.previewLayer = AVCaptureVideoPreviewLayer.init(session: self.session)
self.previewLayer.frame = CGRect.init(x: 0, y: 0, width: WIDTH, height: HEIGHT)
self.previewLayer.videoGravity = AVLayerVideoGravity(rawValue: "AVLayerVideoGravityResizeAspectFill")
self.view.layer.addSublayer(self.previewLayer)
// //自动白平衡
// if device.isWhiteBalanceModeSupported(.autoWhiteBalance) {
// device.whiteBalanceMode = .autoWhiteBalance
// }
// device.unlockForConfiguration()//解锁
// }
setupButton()
//启动
self.session.startRunning()
//添加进度条
progressBar.frame = CGRect(x: 0, y: 0, width: self.view.bounds.width,
height: self.view.bounds.height * 0.1)
progressBar.backgroundColor = UIColor(red: 4, green: 3, blue: 3, alpha: 0.5)
self.view.addSubview(progressBar)
}
//MARK: 添加自定义按钮等UI
func customUI(){
//前后摄像头切换
let changeBtn = UIButton.init()
changeBtn.frame = CGRect.init(x: Int(WIDTH - 50), y:TopSpaceHigh, width: 40, height: 40)
changeBtn.setImage(UIImage.init(named: "btn_list_takephotos"), for: UIControl.State.normal)
changeBtn.addTarget(self, action: #selector(changeCamera), for: .touchUpInside)
view.addSubview(changeBtn)
//拍照按钮
photoButton = UIButton(type: .custom)
photoButton?.frame = CGRect(x: WIDTH * 1 / 2.0 - 30, y: HEIGHT - 100, width: 60, height: 60)
photoButton?.setImage(UIImage(named: "icon_shot_sel"), for: .normal)
photoButton?.addTarget(self, action: #selector(shutterCamera), for: .touchUpInside)
view.addSubview(photoButton!)
//闪光灯按钮
flashBtn = UIButton.init()
flashBtn?.frame = CGRect.init(x: 10, y:TopSpaceHigh, width: 40, height: 40)
flashBtn?.addTarget(self, action: #selector(flashAction), for: .touchUpInside)
flashBtn?.setImage(UIImage.init(named: "icon_flash on_sel"), for: UIControl.State.normal)
view.addSubview(flashBtn!)
//取消
let cancelBtn = UIButton.init()
cancelBtn.frame = CGRect.init(x: 10, y: HEIGHT - 100, width: 60, height: 60)
cancelBtn.setTitle("取消", for: .normal)
cancelBtn.addTarget(self, action: #selector(cancelActin), for: .touchUpInside)
view.addSubview(cancelBtn)
}
//MARK:前后摄像头更改事件
@objc func changeCamera(){
//1.获取摄像头,并且改变原有的摄像头
guard var position = input?.device.position else { return }
//获取当前应该显示的镜头
position = position == .front ? .back : .front
//2.重新创建输入设备对象
//创建新的device
let devices = AVCaptureDevice.devices(for: AVMediaType.video) as [AVCaptureDevice]
for devic in devices{
if devic.position==position{
device = devic
}
}
//input
guard let videoInput = try? AVCaptureDeviceInput(device: device!) else { return }
//3. 改变会话的配置前一定要先开启配置,配置完成后提交配置改变
session.beginConfiguration()
//移除原有设备输入对象
session.removeInput(input)
//添加新的输入对象
//添加输入到会话中
if(self.session.canAddInput(videoInput)){
self.session.addInput(videoInput)
}
//提交会话配置
session.commitConfiguration()
//切换
self.input = videoInput
}
//MARK:拍照按钮点击事件
@objc func shutterCamera(){
//拍照
videoConnection = photoOutput.connection(with: AVMediaType.video)
if videoConnection == nil {
print("take photo failed!")
return
}
photoOutput.capturePhoto(with: setting!, delegate: self)
}
//MARK: 闪光灯开关
@objc func flashAction(){
isflash = !isflash//改变闪光灯
try? device.lockForConfiguration()
if(isflash){//开启
//开启闪光灯方法一 level取值0-1
// guard ((try? device.setTorchModeOn(level: 0.5)) != nil) else {
// print("闪光灯开启出错")
// return
// }
device.torchMode = .on//开启闪光灯方法二
}else{//关闭
if device.hasTorch {
device.torchMode = .off//关闭闪光灯
}
}
device.unlockForConfiguration()
}
//MARK:取消按钮
@objc func cancelActin(){
self.imageView?.removeFromSuperview()
if(!session.isRunning){
session.startRunning()
}
}
// //拍照完成输出--ios10新的
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
session.stopRunning()//停止
let data = photo.fileDataRepresentation();
let image=UIImage.init(data: data!)
print("\(photo.metadata)")
}
// // //拍照完成输出ios10之前的
// func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
// session.stopRunning()//停止
// let imagedata = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer!, previewPhotoSampleBuffer: previewPhotoSampleBuffer)
// let image=UIImage.init(data: imagedata!)
// // let imageV=UIImageView.init(frame: CGRect.init(x: 0, y:120, width: 100, height: 100))
// // imageV.image=image
// // view.addSubview(imageV)
//
// }
//创建按钮
func setupButton(){
//创建录制按钮
self.recordButton = UIButton(frame: CGRect.init(x: 0, y: 0, width: 120, height: 50))
self.recordButton.backgroundColor = UIColor.red;
self.recordButton.layer.masksToBounds = true
self.recordButton.setTitle("按住录像", for: UIControl.State.normal)
self.recordButton.layer.cornerRadius = 20.0
self.recordButton.layer.position = CGPoint(x: Int(self.view.bounds.width/2),
y:Int(self.view.bounds.height)-Int(bottomSafeHeight)-150)
self.recordButton.addTarget(self, action: #selector(onTouchDownRecordButton),
for: .touchDown)
self.recordButton.addTarget(self, action: #selector(onTouchUpRecordButton),
for: .touchUpInside)
//创建保存按钮
self.saveButton = UIButton(frame: CGRect.init(x: 0, y: 0, width: 70, height: 50))
self.saveButton.backgroundColor = UIColor.gray;
self.saveButton.layer.masksToBounds = true
self.saveButton.setTitle("保存", for: UIControl.State.normal)
self.saveButton.layer.cornerRadius = 20.0
self.saveButton.layer.position = CGPoint(x: Int(self.view.bounds.width) - 60,
y:Int(self.view.bounds.height)-Int(bottomSafeHeight)-150)
self.saveButton.addTarget(self, action: #selector(onClickStopButton),
for: .touchUpInside)
//回看按钮
let backlookBtn:UIButton=UIButton.init(frame: CGRect.init(x: 100, y: 200, width: 100, height: 50))
backlookBtn.setTitle("回看按钮", for: UIControl.State.normal)
backlookBtn.addTarget(self, action:#selector(reviewRecord) , for: .touchUpInside)
view.addSubview(backlookBtn)
//添加按钮到视图上
self.view.addSubview(self.recordButton);
self.view.addSubview(self.saveButton);
}
//按下录制按钮,开始录制片段
@objc func onTouchDownRecordButton(sender: UIButton){
if(!stopRecording) {
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory,
.userDomainMask, true)
let documentsDirectory = paths[0] as String
let outputFilePath = "\(documentsDirectory)/output-\(appendix).mov"
appendix += 1
let outputURL = NSURL(fileURLWithPath: outputFilePath)
let fileManager = FileManager.default
if(fileManager.fileExists(atPath: outputFilePath)) {
do {
try fileManager.removeItem(atPath: outputFilePath)
} catch _ {
}
}
print("开始录制:\(outputFilePath) ")
movieoutput.startRecording(to: outputURL as URL, recordingDelegate: self as AVCaptureFileOutputRecordingDelegate)
}
}
//松开录制按钮,停止录制片段
@objc func onTouchUpRecordButton(sender: UIButton){
if(!stopRecording) {
timer?.invalidate()
progressBarTimer?.invalidate()
movieoutput.stopRecording()
}
}
//录像开始的代理方法
func captureOutput(captureOutput: AVCaptureFileOutput!,
didStartRecordingToOutputFileAtURL fileURL: NSURL!,
fromConnections connections: [AnyObject]!) {
startProgressBarTimer()
startTimer()
}
//录像结束的代理方法
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
// //直接保存单个视频片段
// saveVideoDerectlyToAblum(outputFileURL: outputFileURL)
//******下面是合成处理多个视频片段
let asset : AVURLAsset = AVURLAsset(url: outputFileURL as URL, options: nil)
var duration : TimeInterval = 0.0
duration = CMTimeGetSeconds(asset.duration)
print("生成视频片段002:\(asset)---\(outputFileURL)")
videoAssets.append(asset)//所有录像片段数组
assetURLs.append(outputFileURL.path)///录像的片段url存到数组
remainingTime = remainingTime - duration
//到达允许最大录制时间,自动合并视频
if remainingTime <= 0 {
mergeVideos()//合并视频
}
}
//剩余时间计时器
func startTimer() {
timer = Timer(timeInterval: remainingTime, target: self,
selector: #selector(timeout), userInfo: nil,
repeats:true)
RunLoop.current.add(timer!, forMode: RunLoop.Mode.default)
}
//录制时间达到最大时间
@objc func timeout() {
stopRecording = true
print("时间到。")
movieoutput.stopRecording()
timer?.invalidate()
progressBarTimer?.invalidate()
}
//进度条计时器
func startProgressBarTimer() {
progressBarTimer = Timer(timeInterval: incInterval, target: self,
selector: #selector(progress),
userInfo: nil, repeats: true)
RunLoop.current.add(progressBarTimer!,
forMode: RunLoop.Mode.default)
}
//修改进度条进度
@objc func progress() {
let progressProportion: CGFloat = CGFloat(incInterval / totalSeconds)
let progressInc: UIView = UIView()
progressInc.backgroundColor = UIColor(red: 55/255, green: 186/255, blue: 89/255,
alpha: 1)
let newWidth = progressBar.frame.width * progressProportion
progressInc.frame = CGRect(x: oldX , y: 0, width: newWidth,
height: progressBar.frame.height)
oldX = oldX + newWidth
progressBar.addSubview(progressInc)
}
//保存按钮点击
@objc func onClickStopButton(sender: UIButton){
mergeVideos()
}
//合并视频片段
func mergeVideos() {
let duration = totalSeconds
let composition = AVMutableComposition()
//合并视频、音频轨道
let firstTrack = composition.addMutableTrack(
withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)//获取工程文件中的视频轨道
let audioTrack = composition.addMutableTrack(
withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)//获取工程文件中的音频轨道
var insertTime: CMTime = CMTime.zero
for asset in videoAssets {
print("合并视频片段001:\(asset)")
let videoArr = asset.tracks(withMediaType: AVMediaType.video)//从素材中获取视频轨道
do {
try firstTrack!.insertTimeRange(
CMTimeRangeMake(start: CMTime.zero, duration: asset.duration),
of: videoArr[0] ,
at: insertTime)
} catch _ {
}
let audioArr = asset.tracks(withMediaType: AVMediaType.audio)//从素材中获取音频轨道
print("\(audioArr.count)-----\(videoArr.count)")
do {
// try audioTrack!.insertTimeRange(
// CMTimeRangeMake(start: CMTime.zero, duration: asset.duration),
// of: audioArr[0],
// at: insertTime)
} catch _ {
}
insertTime = CMTimeAdd(insertTime, asset.duration)
}
//旋转视频图像,防止90度颠倒
firstTrack!.preferredTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2))
//定义最终生成的视频尺寸(矩形的)
print("视频原始尺寸:", firstTrack!.naturalSize)
let renderSize = CGSize.init(width: firstTrack!.naturalSize.height, height: firstTrack!.naturalSize.height)
print("最终渲染尺寸:", renderSize)
//通过AVMutableVideoComposition实现视频的裁剪(矩形,截取正中心区域视频)
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: framesPerSecond)
videoComposition.renderSize = renderSize
//视频组合指令
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(
start: CMTime.zero,duration: CMTimeMakeWithSeconds(Float64(duration), preferredTimescale: framesPerSecond))
let transformer: AVMutableVideoCompositionLayerInstruction =
AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack!)
// let t1 = CGAffineTransform(translationX: firstTrack!.naturalSize.height,
// y: -(firstTrack!.naturalSize.width-firstTrack!.naturalSize.height)/2)
// let t2 = CGAffineTransform.init(rotationAngle: CGFloat(Double.pi/2))
// let finalTransform: CGAffineTransform = t2
// transformer.setTransform(finalTransform, at: CMTime.zero)
instruction.layerInstructions = [transformer]//视频涂层指令集合
videoComposition.instructions = [instruction]
//获取合并后的视频路径
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory,
.userDomainMask,true)[0]
let destinationPath = documentsPath + "/mergeVideo-\(arc4random()%1000).mov"
print("合并后的视频002:\(destinationPath)")
let videoPath: NSURL = NSURL(fileURLWithPath: destinationPath as String)
let exporter = AVAssetExportSession(asset: composition,
presetName:AVAssetExportPresetHighestQuality)!
exporter.outputURL = videoPath as URL
exporter.outputFileType = AVFileType.mov
exporter.videoComposition = videoComposition //设置videoComposition
exporter.shouldOptimizeForNetworkUse = true
exporter.timeRange = CMTimeRangeMake(
start: CMTime.zero,duration: CMTimeMakeWithSeconds(Float64(duration), preferredTimescale: framesPerSecond))
exporter.exportAsynchronously(completionHandler: {
print("导出状态\(exporter.status)")
//将合并后的视频保存到相册
self.exportDidFinish(session: exporter)
})
}
//将合并后的视频保存到相册
func exportDidFinish(session: AVAssetExportSession) {
print("视频合并成功!")
weak var weakSelf=self
outputURL = session.outputURL! as NSURL
//将录制好的录像保存到照片库中
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: weakSelf!.outputURL! as URL)//保存录像到系统相册
}, completionHandler:{(isSuccess: Bool, error: Error?) in
DispatchQueue.main.async {
//重置参数,吧碎片视频全部删除
self.reset()
//弹出提示框
let alertController = UIAlertController(title: "视频保存成功",
message: "是否需要回看录像?", preferredStyle: .alert)
let okAction = UIAlertAction(title: "确定", style: .default, handler: {
action in
//录像回看
weakSelf?.reviewRecord()
})
let cancelAction = UIAlertAction(title: "取消", style: .cancel,
handler: nil)
alertController.addAction(okAction)
alertController.addAction(cancelAction)
self.present(alertController, animated: true,
completion: nil)
}
}
)
}
//视频保存成功,重置各个参数,准备新视频录制
func reset() {
//删除视频片段
for assetURL in assetURLs {
if(FileManager.default.fileExists(atPath: assetURL)) {
do {
try FileManager.default.removeItem(atPath: assetURL)
} catch _ {
}
print("删除视频片段: \(assetURL)")
}
}
//进度条还原
let subviews = progressBar.subviews
for subview in subviews {
subview.removeFromSuperview()
}
//各个参数还原
videoAssets.removeAll(keepingCapacity: false)
assetURLs.removeAll(keepingCapacity: false)
appendix = 1
oldX = 0
stopRecording = false
remainingTime = totalSeconds
}
//录像回看
@objc func reviewRecord() {
print("录像回放---\(String(describing: outputURL))")
//视频播放现在两种方式:AVPlayer:自定义UI,目前大多数的第三方播放软件都是基于这个进行封装
// AVPlayerViewController: 封装好的AVPlayer,可以直接作为视图控制器弹出播放,也可以使用添加view方式使用,不可以自定义UI。
//********通过AVPlayer播放视屏
//定义一个视频播放器方式一,这个是直接创建,不能设置资源的相关配置
// let player = AVPlayer(url: outputURL as URL)
// //创建媒体资源管理对象
// let palyerItem:AVPlayerItem = AVPlayerItem(url: outputURL! as URL)
// //创建AVplayer:负责视频播放,方式二,可以设置资源的相关配置,更灵活
// let player:AVPlayer = AVPlayer.init(playerItem: palyerItem)
// player.rate = 1.0//播放速度 播放前设置
// //创建显示视频的图层
// let playerLayer = AVPlayerLayer.init(player: player)
// playerLayer.videoGravity = .resizeAspect
// playerLayer.frame = CGRect.init(x: 100, y: 100, width: 200, height: 200)
// playerLayer.backgroundColor=UIColor.blue.cgColor
// self.view.layer.addSublayer(playerLayer)
// //播放
// player.play()
//定义一个视频播放器,通过本地文件路径初始化
let player = AVPlayer(url: outputURL! as URL)
let playerViewController:AVPlayerViewController = AVPlayerViewController()
playerViewController.player = player
self.present(playerViewController, animated: true) {
playerViewController.player!.play()
}
}
//直接保存录像到照片库,未经过合成
func saveVideoDerectlyToAblum(outputFileURL: URL){
weak var weakSelf=self
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputFileURL as URL)//保存录像到系统相册
}, completionHandler:{(isSuccess: Bool, error: Error?) in
DispatchQueue.main.async {
//重置参数,吧碎片视频全部删除
weakSelf!.reset()
//弹出提示框
let alertController = UIAlertController(title: "视频保存成功",
message: "是否需要回看录像?", preferredStyle: .alert)
let okAction = UIAlertAction(title: "确定", style: .default, handler: {
action in
})
let cancelAction = UIAlertAction(title: "取消", style: .cancel,
handler: nil)
alertController.addAction(okAction)
alertController.addAction(cancelAction)
self.present(alertController, animated: true,
completion: nil)
}
}
)
}
}
2.************视屏截图**************
/// 根据视频url和时间点截图
static func thumbnailImageForVideo(videoURL: URL, time: TimeInterval) -> UIImage? {
let asset = AVURLAsset.init(url: videoURL, options: nil)
let assetImageGenerator = AVAssetImageGenerator(asset: asset)
assetImageGenerator.appliesPreferredTrackTransform = true
assetImageGenerator.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let thumbnailCGImage: CGImage?
let thumbnailImageTime: CFTimeInterval = time
var thumbnailImage: UIImage?
do {
thumbnailCGImage = try assetImageGenerator.copyCGImage(at: CMTimeMake(value: Int64(thumbnailImageTime),timescale: 60), actualTime: nil)
if let cgImage = thumbnailCGImage {
thumbnailImage = UIImage(cgImage: cgImage)
}
} catch {
}
return thumbnailImage
}
3.**********视频压缩*********
//视频压缩,就是以低质量的导出
func compressVideo(){
let originPath=NSHomeDirectory() + "video"+"001.mov"
// 进行压缩
let asset:AVAsset=AVAsset.init(url: URL.init(string: originPath)!)
//创建视频资源导出会话
/**
NSString *const AVAssetExportPresetLowQuality; // 低质量
NSString *const AVAssetExportPresetMediumQuality;
NSString *const AVAssetExportPresetHighestQuality; //高质量
*/
let sesssion:AVAssetExportSession=AVAssetExportSession.init(asset: asset, presetName: AVAssetExportPresetLowQuality)!//设置资源的导出质量
// 创建导出的url
let resultPath=NSHomeDirectory() + "video"+"001.mov"
sesssion.outputURL = URL.init(string: resultPath)
// 必须配置输出属性
sesssion.outputFileType = AVFileType(rawValue: "com.apple.quicktime-movie");
// 导出视频
sesssion.exportAsynchronously {
print("视频导出完成")
}
}
========音频拼接合成混音=======
参考OC:https://cn.aliyun.com/jiaocheng/384681.html
参考Swift:http://blog.csdn.net/xuzenghuifeng/article/details/53420841
func conbindAudio(){
//********本地音频文件的url
let filePath:String=Bundle.main.path(forResource: "tesad", ofType: "mp3")!
let url = URL.init(fileURLWithPath:filePath)
let filePath2:String=Bundle.main.path(forResource: "tesad", ofType: "mp3")!
let url2 = URL.init(fileURLWithPath:filePath2)
// 将源文件转换为可处理的资源(初始资产),可理解为初次加工
let originalAsset:AVURLAsset = AVURLAsset.init(url: url)
let originalAsset2:AVURLAsset = AVURLAsset.init(url: url2)
//******想要合成音频文件,第一步,必须创建AVMutableComposition,类似于很多api这个后缀也为composition,意思可以理解为合成物,但不是最终产生的文件
let composition:AVMutableComposition = AVMutableComposition()//可以看做轨道集合
//创建音频轨道素材----此处创建的为音轨属性,可以理解为合成物所需要的原料 ,对音轨的加工都在这个方法里面进行,此处为音频合成MediaType为 AVMediaTypeAudio
let appendedAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)!
let appendedAudioTrack2:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: 0)!
//将初始资产再加工生成可用于拼接的(音轨资产) tracksWithMediaType()输出为[AVAssetTrack]数组取处于第一位的资源,也可能产物就是一个,只不过相当于用了个筐来盛放
let assetTrack1:AVAssetTrack = originalAsset.tracks(withMediaType: AVMediaType.audio).first!
let assetTrack2:AVAssetTrack = originalAsset2.tracks(withMediaType: AVMediaType.audio).first!
//控制时间范围
let timeRange1 = CMTimeRangeMake(kCMTimeZero, originalAsset.duration)
let timeRange2 = CMTimeRangeMake(kCMTimeZero, originalAsset2.duration)
//******音频合并,插入音轨文件,---音轨拼接(只要选好插入的时间点,插入的音轨资源可将多个assetTrack1(音轨资产)拼接到一起)
try! appendedAudioTrack.insertTimeRange(timeRange1, of: assetTrack1, at:kCMTimeZero)
try! appendedAudioTrack2.insertTimeRange(timeRange2, of: assetTrack2, at:kCMTimeZero)//这里kCMTimeZero可以跟换一下,单必须是CMTime类型的,如果都是用kCMTimeZero,就会声音重叠
//********导出合并后的音频文件
let exportSession:AVAssetExportSession = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetAppleM4A)!
//这里就用到了上面的composition,composition作为原料加上下面的各种属性最终生成了指定路径下的目标文件
let realPath = NSHomeDirectory() + "/record/total.m4a" //指定路径
exportSession.outputURL = URL(fileURLWithPath: realPath)
exportSession.outputFileType = AVFileType.m4a
exportSession.exportAsynchronously(completionHandler: {() -> Void in
print("exportSession...",exportSession)
switch exportSession.status {
case .failed: break
case .completed: break
case .waiting: break
default:break
}
})
}