1. 程式人生 > >ios 對 AVAsset 音訊-視訊重編碼

ios 對 AVAsset 音訊-視訊重編碼

- (void)fk {

    NSString *serializationQueueDescription = [NSString stringWithFormat:@"%@ serialization queue", self];

// Create the main serialization queue.

    self.mainSerializationQueue = dispatch_queue_create([serializationQueueDescription UTF8String], NULL);

    NSString *rwAudioSerializationQueueDescription = [NSString

stringWithFormat:@"%@ rw audio serialization queue", self];

// Create the serialization queue to use for reading and writing the audio data.

    self.rwAudioSerializationQueue = dispatch_queue_create([rwAudioSerializationQueueDescription UTF8String], NULL);

    NSString *rwVideoSerializationQueueDescription = [NSString

stringWithFormat:@"%@ rw video serialization queue", self];

// Create the serialization queue to use for reading and writing the video data.

    self.rwVideoSerializationQueue = dispatch_queue_create([rwVideoSerializationQueueDescription UTF8String], NULL);

    //

    [self fk2];

}

- (void)fk2 {

//self.asset = <#AVAsset that you want to reencode#>;

self.cancelled = NO;

/********************************/

//獲取當前時間,日期

    NSDate *currentDate = [NSDate date];

NSDateFormatter *dateFormatter = [[NSDateFormatteralloc] init];

    [dateFormatter setDateFormat:@"YYYYMMddhhmmssSS"];

    NSString *dateString = [dateFormatter stringFromDate:currentDate];

NSString *videoPath = [NSTemporaryDirectory() stringByAppendingPathComponent:[NSStringstringWithFormat:@"%@.mp4",dateString]];

    //

    self.path = videoPath;

    //

//先把路徑下的檔案給刪除掉,保證錄製的檔案是最新的

    [[NSFileManagerdefaultManager] removeItemAtPath:videoPath error:nil];

    NSURL* url = [NSURL fileURLWithPath:videoPath];

    self.outputURL = url;

/********************************/

// Asynchronously load the tracks of the asset you want to read.

    [self.assetloadValuesAsynchronouslyForKeys:@[@"tracks"]completionHandler:^{

// Once the tracks have finished loading, dispatch the work to the main serialization queue.

dispatch_async(self.mainSerializationQueue, ^{

// Due to asynchronous nature, check to see if user has already cancelled.

            if (self.cancelled)

                return;

            BOOL success = YES;

            NSError *localError = nil;

// Check for success of loading the assets tracks.

            success = ([self.asset statusOfValueForKey:@"tracks" error:&localError] == AVKeyValueStatusLoaded);

            if (success)

            {

// If the tracks loaded successfully, make sure that no file exists at the output path for the asset writer.

                NSFileManager *fm = [NSFileManager defaultManager];

                NSString *localOutputPath = [self.outputURL path];

                if ([fm fileExistsAtPath:localOutputPath])

                    success = [fm removeItemAtPath:localOutputPath error:&localError];

            }

            if (success)

                success = [self setupAssetReaderAndAssetWriter:&localError];

            if (success)

                success = [self startAssetReaderAndWriter:&localError];

            if (!success)

                [selfreadingAndWritingDidFinishSuccessfully:success withError:localError];

        });

    }];

}

- (BOOL)setupAssetReaderAndAssetWriter:(NSError **)outError

{

// Create and initialize the asset reader.

    self.assetReader = [[AVAssetReader alloc] initWithAsset:self.asset error:outError];

    BOOL success = (self.assetReader != nil);

    if (success)

    {

// If the asset reader was successfully initialized, do the same for the asset writer.

self.assetWriter = [[AVAssetWriteralloc] initWithURL:self.outputURLfileType:AVFileTypeQuickTimeMovieerror:outError];

self.assetWriter.shouldOptimizeForNetworkUse = YES;//使其更適合在網路上播放

        success = (self.assetWriter != nil);

    }

    if (success)

    {

// If the reader and writer were successfully initialized, grab the audio and video asset tracks that will be used.

        AVAssetTrack *assetAudioTrack = nil, *assetVideoTrack = nil;

        NSArray *audioTracks = [self.asset tracksWithMediaType:AVMediaTypeAudio];

        if ([audioTracks count] > 0)

            assetAudioTrack = [audioTracks objectAtIndex:0];

        NSArray *videoTracks = [self.asset tracksWithMediaType:AVMediaTypeVideo];

        if ([videoTracks count] > 0)

            assetVideoTrack = [videoTracks objectAtIndex:0];

        if (assetAudioTrack)

        {

// If there is an audio track to read, set the decompression settings to Linear PCM and create the asset reader output.

            NSDictionary *decompressionAudioSettings = @{ AVFormatIDKey : [NSNumber numberWithUnsignedInt:kAudioFormatLinearPCM] };

            self.assetReaderAudioOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:assetAudioTrack outputSettings:decompressionAudioSettings];

            [self.assetReaderaddOutput:self.assetReaderAudioOutput];

// Then, set the compression settings to 128kbps AAC and create the asset writer input.

            AudioChannelLayout stereoChannelLayout = {

                .mChannelLayoutTag = kAudioChannelLayoutTag_Stereo,

                .mChannelBitmap = 0,

                .mNumberChannelDescriptions = 0

            };

            NSData *channelLayoutAsData = [NSData dataWithBytes:&stereoChannelLayout length:offsetof(AudioChannelLayout, mChannelDescriptions)];

            NSDictionary *compressionAudioSettings = @{

                                                       AVFormatIDKey         : [NSNumber numberWithUnsignedInt:kAudioFormatMPEG4AAC],

                                                       AVEncoderBitRateKey   : [NSNumber numberWithInteger:128000],

                                                       AVSampleRateKey       : [NSNumber numberWithInteger:44100],

                                                       AVChannelLayoutKey    : channelLayoutAsData,

                                                       AVNumberOfChannelsKey : [NSNumber numberWithUnsignedInteger:2]

                                                       };

            self.assetWriterAudioInput = [AVAssetWriterInput assetWriterInputWithMediaType:[assetAudioTrack mediaType] outputSettings:compressionAudioSettings];

            [self.assetWriteraddInput:self.assetWriterAudioInput];

        }

        if (assetVideoTrack)

        {

// If there is a video track to read, set the decompression settings for YUV and create the asset reader output.

            NSDictionary *decompressionVideoSettings = @{

                                                         (id)kCVPixelBufferPixelFormatTypeKey     : [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],

                                                         (id)kCVPixelBufferIOSurfacePropertiesKey : [NSDictionary dictionary]

};//kCVPixelFormatType_32BGRA  kCVPixelFormatType_422YpCbCr8 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange

            self.assetReaderVideoOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:assetVideoTrack outputSettings:decompressionVideoSettings];

            [self.assetReaderaddOutput:self.assetReaderVideoOutput];

            CMFormatDescriptionRef formatDescription = NULL;

// Grab the video format descriptions from the video track and grab the first one if it exists.

            NSArray *videoFormatDescriptions = [assetVideoTrack formatDescriptions];

            if ([videoFormatDescriptions count] > 0)

                formatDescription = (__bridge CMFormatDescriptionRef)[videoFormatDescriptions objectAtIndex:0];

            CGSize trackDimensions = {

                .width = 0.0,

                .height = 0.0,

            };

// If the video track had a format description, grab the track dimensions from there. Otherwise, grab them direcly from the track itself.

            if (formatDescription)

                trackDimensions = CMVideoFormatDescriptionGetPresentationDimensions(formatDescription, false, false);

            else

                trackDimensions = [assetVideoTrack naturalSize];

            NSDictionary *compressionSettings = nil;

// If the video track had a format description, attempt to grab the clean aperture settings and pixel aspect ratio used by the video.

            if (formatDescription)

            {

                NSDictionary *cleanAperture = nil;

                NSDictionary *pixelAspectRatio = nil;

                CFDictionaryRef cleanApertureFromCMFormatDescription = CMFormatDescriptionGetExtension(formatDescription, kCMFormatDescriptionExtension_CleanAperture);

                if (cleanApertureFromCMFormatDescription)

                {

                    cleanAperture = @{

                                      AVVideoCleanApertureWidthKey            : (id)CFDictionaryGetValue(cleanApertureFromCMFormatDescription, kCMFormatDescriptionKey_CleanApertureWidth),

                                      AVVideoCleanApertureHeightKey           : (id)CFDictionaryGetValue(cleanApertureFromCMFormatDescription, kCMFormatDescriptionKey_CleanApertureHeight),

                                      AVVideoCleanApertureHorizontalOffsetKey : (id)CFDictionaryGetValue(cleanApertureFromCMFormatDescription, kCMFormatDescriptionKey_CleanApertureHorizontalOffset),

                                      AVVideoCleanApertureVerticalOffsetKey   : (id)CFDictionaryGetValue(cleanApertureFromCMFormatDescription, kCMFormatDescriptionKey_CleanApertureVerticalOffset)

                                      };

                }

                CFDictionaryRef pixelAspectRatioFromCMFormatDescription = CMFormatDescriptionGetExtension(formatDescription, kCMFormatDescriptionExtension_PixelAspectRatio);

                if (pixelAspectRatioFromCMFormatDescription)

                {

                    pixelAspectRatio = @{

                                         AVVideoPixelAspectRatioHorizontalSpacingKey : (id)CFDictionaryGetValue(pixelAspectRatioFromCMFormatDescription, kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing),

                                         AVVideoPixelAspectRatioVerticalSpacingKey   : (id)CFDictionaryGetValue(pixelAspectRatioFromCMFormatDescription, kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing)

                                         };

                }

// Add whichever settings we could grab from the format description to the compression settings dictionary.

                if (cleanAperture || pixelAspectRatio)

                {

                    NSMutableDictionary *mutableCompressionSettings = [NSMutableDictionary dictionary];

                    if (cleanAperture)

                        [mutableCompressionSettings setObject:cleanAperture forKey:AVVideoCleanApertureKey];

                    if (pixelAspectRatio)

                        [mutableCompressionSettings setObject:pixelAspectRatio forKey:AVVideoPixelAspectRatioKey];

                    compressionSettings = mutableCompressionSettings;

                }

            }

// Create the video settings dictionary for H.264.

NSMutableDictionary *videoSettings = (NSMutableDictionary *) @{

                                                                           AVVideoCodecKey  : AVVideoCodecH264,

                                                                           AVVideoWidthKey  : [NSNumber numberWithDouble:trackDimensions.width],

                                                                           AVVideoHeightKey : [NSNumber numberWithDouble:trackDimensions.height]

                                                                           };

// Put the compression settings into the video settings dictionary if we were able to grab them.

            if (compressionSettings)

                [videoSettings setObject:compressionSettings forKey:AVVideoCompressionPropertiesKey];

// Create the asset writer input and add it to the asset writer.

            self.assetWriterVideoInput = [AVAssetWriterInput assetWriterInputWithMediaType:[assetVideoTrack mediaType] outputSettings:videoSettings];

            [self.assetWriteraddInput:self.assetWriterVideoInput];

        }

    }

    return success;

}

- (BOOL)startAssetReaderAndWriter:(NSError **)outError

{

    BOOL success = YES;

// Attempt to start the asset reader.

    success = [self.assetReader startReading];

    if (!success)

        *outError = [self.assetReader error];

    if (success)

    {

// If the reader started successfully, attempt to start the asset writer.

        success = [self.assetWriter startWriting];

        if (!success)

            *outError = [self.assetWriter error];

    }

    if (success)

    {

// If the asset reader and writer both started successfully, create the dispatch group where the reencoding will take place and start a sample-writing session.

self.dispatchGroup = dispatch_group_create();

        [self.assetWriterstartSessionAtSourceTime:kCMTimeZero];

self.audioFinished = NO;

self.videoFinished = NO;

if (self.assetWriterAudioInput)

        {

// If there is audio to reencode, enter the dispatch group before beginning the work.

dispatch_group_enter(self.dispatchGroup);

// Specify the block to execute when the asset writer is ready for audio media data, and specify the queue to call it on.

            [self.assetWriterAudioInputrequestMediaDataWhenReadyOnQueue:self.rwAudioSerializationQueueusingBlock:^{

// Because the block is called asynchronously, check to see whether its task is complete.

                if (self.audioFinished)

                    return;

                BOOL completedOrFailed = NO;

// If the task isn't complete yet, make sure that the input is actually ready for more media data.

                while ([self.assetWriterAudioInput isReadyForMoreMediaData] && !completedOrFailed)

                {

// Get the next audio sample buffer, and append it to the output file.

                    CMSampleBufferRef sampleBuffer = [self.assetReaderAudioOutput copyNextSampleBuffer];

                    if (sampleBuffer != NULL)

                    {

                        BOOL success = [self.assetWriterAudioInput appendSampleBuffer:sampleBuffer];

                        CFRelease(sampleBuffer);

                        sampleBuffer = NULL;

                        completedOrFailed = !success;

                    }

                    else

                    {

                        completedOrFailed = YES;

                    }

                }

                if (completedOrFailed)

                {

// Mark the input as finished, but only if we haven't already done so, and then leave the dispatch group (since the audio work has finished).

                    BOOL oldFinished = self.audioFinished;

                    self.audioFinished = YES;

                    if (oldFinished == NO)

                    {

                        [self.assetWriterAudioInput markAsFinished];

                    }

                    dispatch_group_leave(self.dispatchGroup);

                }

            }];

        }

if (self.assetWriterVideoInput)

        {

// If we had video to reencode, enter the dispatch group before beginning the work.

dispatch_group_enter(self.dispatchGroup);

// Specify the block to execute when the asset writer is ready for video media data, and specify the queue to call it on.

            [self.assetWriterVideoInputrequestMediaDataWhenReadyOnQueue:self.rwVideoSerializationQueueusingBlock

相關推薦

ios AVAsset 音訊-視訊編碼

- (void)fk {    NSString *serializationQueueDescription = [NSString stringWithFormat:@"%@ serialization queue", self];// Create the main s

IOS自動播放音訊/視訊

ios 為了節省使用者流量,對於 audio 和 video標籤的 preload 和 autopaly 標籤會自動攔截,除非使用者手動點選互動才會執行 。 對於背景音樂,又必須載入的時候就要執行,解決方法:(不過注意只能在微信瀏覽器裡面開啟) document

iOS 學習總結----音訊/視訊處理

if (event.type == UIEventTypeRemoteControl) {         switch (event.subtype) {             case UIEventSubtypeRemoteControlPlay:                [ _pla

ios開發之音訊視訊開發

一、音訊播放方式 1.System Sound Services 2.AVAudioPlayer 3.Audio Queue Service 4.OpenAL 二、使用 1.System Sound

iOS 簡單的視訊直播功能開發(實時視音訊流錄製編碼+RTMP傳輸+實時拉流解碼播放)

               原生開發基本流程:AVFoundation獲取視訊流,得到未編碼的CMSampleBuffer,需要編碼成Mpeg-4格式。編碼分軟編碼和硬編碼,考慮到iOS8之後VideoToolBox開放使用,選用VideoToolBox進行編碼。坑爹的是針對它連文件都沒有。github上有

即時通訊音視訊開發(七):音訊基礎及編碼原理入門

前言 即時通訊應用中的實時音視訊技術,幾乎是IM開發中的最後一道高牆。原因在於:實時音視訊技術 = 音視訊處理技術 + 網路傳輸技術 的橫向技術應用集合體,而公共網際網路不是為了實時通訊設計的。 系列文章 《即時通訊音視訊開發(五):認識主流視訊編碼技術H.264》 《即時

IOS多媒體-音樂播放 視訊播放//匯入音訊工具箱框架。

使用SystemSoundld播放簡短聲音 //匯入音訊工具箱框架。 import AudioToolbox  //使用SystemSoundld播放簡短聲音     func test1() {         //

iOS視訊編碼和軟編碼、硬解碼、軟解碼

軟編碼:使用CPU進行編碼。編碼框架ffmpeg+x264。 硬編碼:不使用CPU進行編碼,使用顯示卡GPU,專用的DSP、FPGA、ASIC晶片等硬體進行編碼。編碼框架Video ToolBox和AudioToolbox。

FFMPEG視訊h264和音訊aac混合編碼過程

/* The MIT License (MIT) Copyright (c) 2013 winlin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and asso

iOS開發中Objective-C URL的URLEncode(編碼)與URLDecode(解碼)

廢話不多說,直接上程式碼。 <pre name="code" class="objc">// // NSString+URL.h // // Created by aidong on 15/5/8. // Copyright (c) 2015年 aido

IOS 實現音訊視訊播放器功能

//音訊播放器 // ViewController.h // AVAudioPlayer // // Created by mouweng on 17/8/26. // Copyright © 2017年 mouweng. All rights reserved.

使用 ffmpeg 進行網路推流:拉流->解封裝->解碼->處理原始資料(音訊視訊)->編碼->編碼->推流

簡要說明: 1、可拉流:rtmp、rtsp、http 2、可推流: #include "stdafx.h" extern "C" { #include "libavcodec/avcodec.h" #include "libavformat/avformat.h" #

基於iOS的網路音視訊實時傳輸系統(三)- VideoToolbox編碼視訊資料為H264、AAC

server端 -- 編碼音視訊資料為H264、AAC 這部分花了好多時間,本身就不具備這方面的相關知識,查閱了不少資料,不過關於VideoToolbox和AudioToolbox方面的編碼資料寥寥無幾,雖然網上搜索結果看似特別多,其實一看 內容也大同小異,建議還是看看官方

JS、Python字符串的編碼函數

ima 分享 js壓縮 了解 uac type div 字符串 script 1.本周下載過一個筆畫網站http://bihua.51240.com/e998bf__bihuachaxun/ 如上圖所示,輸入阿對應的是http://bihua.51240.com/e998

iOS數組的去,判空,刪除元素,刪除復元素 等

ack 如果 tle sar abc 朋友 計數 led trac 一: 去重 有時需要將NSArray中去除重復的元素,而存在NSArray中的元素不一定都是NSString類型。今天想了想,加上朋友的幫助,想到兩種解決辦法,先分述如下。 1.利用NSDictionar

C#字符串進行編碼解碼

odin div 字符串 編碼解碼 odi encode cnblogs 進行 viewbag 以UTF8編碼格式為例: 1 //編碼 2 ViewBag.FileURL = HttpUtility.UrlEncode(UriString, Encoding.UTF8);

利用sklearn的LabelEncoder標簽進行數字化編碼

spa att sed read guide example log cat lib from sklearn.preprocessing import LabelEncoder def gen_label_encoder(): labels =

JavaSE8基礎 String 字符串進行編碼 字節數組進行解碼

utf-8 block rac news bsp 手冊 try lease pre os :windows7 x64 jdk:jdk-8u131-windows-x64 ide:Eclipse Oxygen Release (4.7.0) code:

iosUITableView進行封裝

不同 重復 控制 擴展 height control ade class 通過 ---恢復內容開始--- 原由 從事ios工作有段時間了,其中UItableView頻繁被使用中,這個過程中不斷重復的創建加入代理,好麻煩,而且也讓viewcontroller代碼顯的臃腫,因此

數組象的去然後排序

排序 ole log obj spa fff ons 慢慢 OS let arr = [ { name: ‘aaa‘, data: 20 },{ name: ‘bbb‘, data: