iOS:使用Swift修剪音频文件?

发布时间:2020-07-31 发布网站:脚本之家
脚本之家收集整理的这篇文章主要介绍了iOS:使用Swift修剪音频文件?脚本之家小编觉得挺不错的,现在分享给大家,也给大家做个参考。
我必须合并音频文件和录制的语音.例如录制的语音是47秒.我必须将4分钟的音频歌曲剪切或修剪为47秒.并合并音频文件.

var url:NSURL?
    if self.audioRecorder != nil
    {
        url = self.audioRecorder!.url
    }

    else
    {
        url = self.soundFileURL!

    }
    print("playing \(url)")
    do
    {
        self.newplayer = try AVPlayer(URL: url!)
        let avAsset = AVURLAsset(URL: url!,options: nil)
        print("\(avAsset)")
        let audioDuration = avAsset.duration
        let totalSeconds = CMTimeGetSeconds(audioDuration)
        let hours = floor(totalSeconds / 3600)
        var minutes = floor(totalSeconds % 3600 / 60)
        var seconds = floor(totalSeconds % 3600 % 60)
        print("hours = \(hours),minutes = \(minutes),seconds = \(seconds)")}

这是输出:// hours = 0.0,minutes = 0.0,seconds = 42.0

对于trim方法我只是尝试了这个:如何设置精确的持续时间,开始时间和结束时间以及新网址:

func exportAsset(asset:AVAsset,fileName:String)
{
    let documentsDirectory = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory,inDomains: .UserDomainMask)[0]
    let trimmedSoundFileURL = documentsDirectory.URLByAppendingPathComponent(fileName)
    print("saving to \(trimmedSoundFileURL!.absoluteString)")

    let filemanager = NSFileManager.defaultManager()
    if filemanager.fileExistsAtPath(trimmedSoundFileURL!.absoluteString!) {
        print("sound exists")
    }

    let exporter = AVAssetExportSession(asset: asset,presetName: AVAssetExportPresetAppleM4A)
    exporter!.outputFileType = AVFileTypeAppleM4A
    exporter!.outputURL = trimmedSoundFileURL

    let duration = CMTimeGetSeconds(asset.duration)
    if (duration < 5.0) {
        print("sound is not long enough")
        return
    }
    // e.g. the first 5 seconds
    let startTime = CMTimeMake(0,1)
    let stopTime = CMTimeMake(5,1)
    let exportTimeRange = CMTimeRangeFromTimeToTime(startTime,stopTime)
    exporter!.timeRange = exportTimeRange


    // do it
    exporter!.exportAsynchronouslyWithCompletionHandler({
        switch exporter!.status {
        case  AVAssetExportSessionStatus.Failed:
            print("export failed \(exporter!.error)")
        case AVAssetExportSessionStatus.Cancelled:
            print("export cancelled \(exporter!.error)")
        default:
            print("export complete")
        }
    })
}

解决方法

最后我找到了我的问题的答案.它工作正常……我附上了下面的代码.我在其中添加了trim音频代码.对于那些试图合并和修剪音频(swift2.3)的人来说,它会很有用:

func mixAudio()
{
    let currentTime = CFAbsoluteTimeGetCurrent()
    let composition = AVMutableComposition()
    let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio,preferredTrackID: kCMPersistentTrackID_Invalid)
    compositionAudioTrack.preferredVolume = 0.8
    let avAsset = AVURLAsset.init(URL: soundFileURL,options: nil)
    print("\(avAsset)")
    var tracks = avAsset.tracksWithMediaType(AVMediaTypeAudio)
    let clipAudioTrack = tracks[0]
    do {
        try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero,avAsset.duration),ofTrack: clipAudioTrack,atTime: kCMTimeZero)
    }
    catch _ {
    }
    let compositionAudioTrack1 = composition.addMutableTrackWithMediaType(AVMediaTypeAudio,preferredTrackID: kCMPersistentTrackID_Invalid)
    compositionAudioTrack.preferredVolume = 0.8

    let avAsset1 = AVURLAsset.init(URL: soundFileURL1)
    print(avAsset1)


    var tracks1 = avAsset1.tracksWithMediaType(AVMediaTypeAudio)
    let clipAudioTrack1 = tracks1[0]
    do {
        try compositionAudioTrack1.insertTimeRange(CMTimeRangeMake(kCMTimeZero,avAsset1.duration),ofTrack: clipAudioTrack1,atTime: kCMTimeZero)
    }
    catch _ {
    }
    var paths = NSSearchPathForDirectoriesInDomains(.LibraryDirectory,.UserDomainMask,true)
    let CachesDirectory = paths[0]
    let strOutputFilePath = CachesDirectory.stringByAppendingString("/Fav")
    print(" strOutputFilePath is \n \(strOutputFilePath)")

    let requiredOutputPath = CachesDirectory.stringByAppendingString("/Fav.m4a")
    print(" requiredOutputPath is \n \(requiredOutputPath)")

    soundFile1 = NSURL.fileURLWithPath(requiredOutputPath)
    print(" OUtput path is \n \(soundFile1)")
    var audioDuration = avAsset.duration
    var totalSeconds = CMTimeGetSeconds(audioDuration)
    var hours = floor(totalSeconds / 3600)
    var minutes = floor(totalSeconds % 3600 / 60)
    var seconds = Int64(totalSeconds % 3600 % 60)
    print("hours = \(hours),seconds = \(seconds)")

    let recordSettings:[String : AnyObject] = [

        AVFormatIDKey: Int(kAudioFormatMPEG4AAC),AVSampleRateKey: 12000,AVNumberOfChannelsKey: 1,AVEncoderAudioQualityKey: AVAudioQuality.Low.rawValue
    ]
    do {
        audioRecorder = try AVAudioRecorder(URL: soundFile1,settings: recordSettings)
        audioRecorder!.delegate = self
        audioRecorder!.meteringEnabled = true
        audioRecorder!.prepareToRecord()
    }

    catch let error as NSError
    {
        audioRecorder = nil
        print(error.localizedDescription)
    }

    do {

        try NSFileManager.defaultManager().removeItemAtURL(soundFile1)
    }
    catch _ {
    }
    let exporter = AVAssetExportSession(asset: composition,presetName: AVAssetExportPresetAppleM4A)
    exporter!.outputURL = soundFile1
    exporter!.outputFileType = AVFileTypeAppleM4A
    let duration = CMTimeGetSeconds(avAsset1.duration)
    print(duration)
    if (duration < 5.0) {
        print("sound is not long enough")
        return
    }
    // e.g. the first 30 seconds
    let startTime = CMTimeMake(0,1)
    let stopTime = CMTimeMake(seconds,stopTime)
    print(exportTimeRange)
    exporter!.timeRange = exportTimeRange
    print(exporter!.timeRange)


    exporter!.exportAsynchronouslyWithCompletionHandler
        {() -> Void in
            print(" OUtput path is \n \(requiredOutputPath)")
            print("export complete: \(CFAbsoluteTimeGetCurrent() - currentTime)")
            var url:NSURL?
            if self.audioRecorder != nil
            {
                url = self.audioRecorder!.url
            }

            else
            {
                url = self.soundFile1!
                print(url)

            }

            print("playing \(url)")

    do
    {
        print(self.soundFile1)
        print(" OUtput path is \n \(requiredOutputPath)")
        self.setSessionPlayback()
        do {
                                        self.optData = try NSData(contentsOfURL: self.soundFile1!,options: NSDataReadingOptions.DataReadingMappedIfSafe)
                                        print(self.optData)
                                        self.recordencryption = self.optData.base64EncodedStringWithOptions(NSDataBase64EncodingOptions())

                                        //  print(self.recordencryption)
                                          self.myImageUploadRequest()


                                    }



        self.wasteplayer = try AVAudioPlayer(contentsOfURL: self.soundFile1)
        self.wasteplayer.numberOfLoops = 0
        self.wasteplayer.play()


    }

    catch _
    {
    }

    }
}

总结

以上是脚本之家为你收集整理的iOS:使用Swift修剪音频文件?全部内容,希望文章能够帮你解决iOS:使用Swift修剪音频文件?所遇到的程序开发问题。

如果觉得脚本之家网站内容还不错,欢迎将脚本之家网站推荐给程序员好友。

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您喜欢交流学习经验,点击链接加入脚本之家官方QQ群:1065694478
脚本之家官方公众号

微信公众号搜索 “ 程序精选 ” ,选择关注!

微信公众号搜索 “ 程序精选 ”
精选程序员所需精品干货内容!