6.基於FFMPEG+SDL2播放音訊
參考資料:
1.雷博部落格
2. An ffmpeg and SDL Tutorial
前面瞭解了FFMPEG解碼MP3檔案為PCM,以及將PCM通過SDL2進行播放,下面就是將這兩者進行結合,使之能夠邊解碼邊播放。。。。
一、綜述
總共有2份程式碼,第一份是參考雷博的程式碼,第二份是在雷博的程式碼基礎上進行修改,增加了連結串列佇列控制。
二、程式碼1(基礎程式碼)
關於FFMPEG解碼PCM以及SDL播放音訊相關知識在前兩篇文章中已經詳細描述了,所以不再在這裡贅述,參考雷博的程式碼,將前面兩篇文章中的程式碼進行綜合,即可正常解碼並播放音訊。
大致流程為:
初始化複用器和解複用器—>獲取輸入檔案的一些資訊—->查詢解碼器並開啟—>初始化SDL—>播放音訊—->讀出音訊資料並解碼—>等待SDL讀取音訊—>SDL回撥讀取音訊資料—>結束。
其中,比較重要的便是回撥函式,當裝置需要音訊資料時候便會呼叫此回撥函式獲取音訊資料,因此在這個函式中,我們需要將解碼後的音訊資料賦值給stream。
程式碼如下:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
#include "SDL2/SDL.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <SDL2/SDL.h>
#ifdef __cplusplus
};
#endif
#endif
//#define debug_msg(fmt, args...) printf("--->[%s,%d] " fmt "\n\n", __FUNCTION__, __LINE__, ##args)
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
//48000 * (32/8)
unsigned int audioLen = 0;
unsigned char *audioChunk = NULL;
unsigned char *audioPos = NULL;
void fill_audio(void * udata, Uint8 * stream, int len)
{
SDL_memset(stream, 0, len);
if (audioLen == 0)
return;
len = (len>audioLen?audioLen:len);
SDL_MixAudio(stream,audioPos,len,SDL_MIX_MAXVOLUME);
audioPos += len;
audioLen -= len;
}
int test_audio_2_play()
{
AVFormatContext *pFortCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVPacket *pPkt = NULL;
AVFrame*pFrame = NULL;
struct SwrContext *pSwrCtx = NULL;
SDL_AudioSpec wantSpec;
//FILE* outFile = fopen("output.pcm", "wb");
char inFile[] = "skycity1.mp3";
int ret = -1;
int audioIndex = -1;
int i = 0;
int got_picture = -1;
uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO; //通道佈局 輸出雙聲道
enum AVSampleFormat out_sample_fmt=AV_SAMPLE_FMT_S16; //聲音格式
int out_sample_rate=44100; //取樣率
int out_nb_samples = -1;
int out_channels = -1; //通道數
int out_buffer_size = -1; //輸出buff
unsigned char *outBuff = NULL;
uint64_t in_chn_layout = -1; //通道佈局
struct SwrContext *au_convert_ctx;
av_register_all();
pFortCtx = avformat_alloc_context();
if (avformat_open_input(&pFortCtx, inFile, NULL, NULL) != 0) //open input file and read data into buf
{
printf("avformat_open_input error!\n");
ret = -1;
goto ERR_1;
}
if (avformat_find_stream_info(pFortCtx, NULL) < 0) //find stream some info
{
printf("avformat_find_stream_info error!\n");
ret = -1;
goto ERR_1;
}
/* find audio index */
for (i = 0; i < pFortCtx->nb_streams; i++)
{
if (pFortCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
break;
}
}
if (-1 == audioIndex)
{
printf("can not find audio index!\n");
ret = -1;
goto ERR_1;
}
printf("------>audioIndex is %d\n", audioIndex);
pCodecCtx = pFortCtx->streams[audioIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (NULL == pCodec)
{
printf("can not find decoder!\n");
ret = -1;
goto ERR_1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec.\n");
ret = -1;
goto ERR_1;
}
if (NULL == (pPkt = (AVPacket *)av_malloc(sizeof(AVPacket))))
{
printf("AV malloc failure.\n");
ret = -1;
goto ERR_2;
}
//out parameter
out_nb_samples = pCodecCtx->frame_size;
out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
outBuff = (unsigned char *)av_malloc(MAX_AUDIO_FRAME_SIZE*2); //雙聲道
printf("-------->out_buffer_size is %d\n",out_buffer_size);
in_chn_layout = av_get_default_channel_layout(pCodecCtx->channels);
pFrame = av_frame_alloc();
//SDL
wantSpec.freq = out_sample_rate;
wantSpec.format = AUDIO_S16SYS;
wantSpec.channels = out_channels;
wantSpec.silence = 0;
wantSpec.samples = out_nb_samples;
wantSpec.callback = fill_audio;
wantSpec.userdata = pCodecCtx;
if (SDL_OpenAudio(&wantSpec, NULL) < 0)
{
printf("can not open SDL!\n");
ret = -1;
goto ERR_3;
}
//Swr
au_convert_ctx=swr_alloc_set_opts(NULL,
out_chn_layout, /*out*/
out_sample_fmt, /*out*/
out_sample_rate, /*out*/
in_chn_layout, /*in*/
pCodecCtx->sample_fmt , /*in*/
pCodecCtx->sample_rate, /*in*/
0,
NULL);
swr_init(au_convert_ctx);
SDL_PauseAudio(0);
while(av_read_frame(pFortCtx, pPkt) >= 0)
{
if (pPkt->stream_index == audioIndex)
{
if (avcodec_decode_audio4(pCodecCtx, pFrame, &got_picture, pPkt) < 0)
{
printf("Error in decoding audio frame.\n");
ret = -1;
break;
}
if (got_picture > 0)
{
swr_convert(au_convert_ctx,&outBuff, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);
//fwrite(outBuff, 1, out_buffer_size, outFile);
while(audioLen > 0)
SDL_Delay(1);
audioChunk = (unsigned char *)outBuff;
audioPos = audioChunk;
audioLen = out_buffer_size;
}
}
av_free_packet(pPkt);
}
SDL_CloseAudio();
SDL_Quit();
swr_free(&au_convert_ctx);
ERR_3:
av_free(outBuff);
ERR_2:
avcodec_close(pCodecCtx);
avformat_close_input(&pFortCtx);
ERR_1:
avformat_free_context(pFortCtx);
//fclose(outFile);
return ret;
}
int main(int argc, char *argv[])
{
//test_audio_2_PCM();
test_audio_2_play();
return 0;
}
三、程式碼2(增加連結串列佇列)
在這份程式碼中,主要是增加了連結串列佇列,並將解碼工作放在了回撥函式中。
和上面的區別是:
主程式中,每讀取一包的資料(av_read_frame)將其放入連結串列中
在回撥函式中,從連結串列中取出每包的資料並將其解碼放入stream中
1)連結串列結構
連結串列結構如下:
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
其中,first_pkt和last_pkt為兩個主要的連結串列結點,first_pkt用於指向第一個連結串列結點,當我們取資料的時候,總是取出第一個結點的資料,並將第一個結點的next重新複製給first_pkt,用於下次取資料時使用,last_pkt為最後一個結點,當放資料時候,會將新的資料結點地址賦值給last_pkt的next,並將last_pkt重新指向最後一個結點,依此反覆,不斷將新的資料結點新增到連結串列的最後。
其原型為:
typedef struct AVPacketList {
AVPacket pkt;
struct AVPacketList *next;
} AVPacketList;
nb_packets為當前的總packet數目
size 為當前所有packet中資料的大小
mutex 為互斥鎖
cond 為條件變數
因為主執行緒會不斷向連結串列中放入讀取到的資料以及回撥函式中會不斷從連結串列中讀取資料,所以需要有互斥鎖以及條件變數來進行同步操作。
2)放資料到佇列中
主函式中將讀取到的資料放到佇列中,函式原型如下:
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt) //佇列為空
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1; //將當前連結串列的最後一個結點的next指向pkt1
q->last_pkt = pkt1; //將last_pkt指向最後一個結點,即pkt1
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
注意,在進行連結串列操作的時候,需要加鎖解鎖,防止其他地方同時進行操作。
3)從佇列中取資料
從連結串列佇列中取出結點資料,如下:
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;)
{
if(quit)
{
ret = -1;
break;
}
pkt1 = q->first_pkt; //指向第一個結點,即取出第一個結點
if (pkt1)
{
q->first_pkt = pkt1->next; //即q->first_pkt = q->first_pkt->next 將第一個結點指向的下一個結點設定為first_pkt,可以理解為取出當前第一個結點
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}
else if (!block)
{
ret = 0;
break;
}
else
{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
4)回撥函式
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while(len > 0)
{
if(audio_buf_index >= audio_buf_size) //表示當前audio_buf中已經沒有資料,需要解碼資料了
{
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf,sizeof(audio_buf));
if(audio_size < 0)
{
/* If error, output silence */
audio_buf_size = 1024;
memset(audio_buf, 0, audio_buf_size);
}
else
{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
在上面程式碼中有三個static 變數:
audio_buf 、audio_buf_size、 audio_buf_index
audio_buf中存放的為解碼後的資料
audio_buf_size 為當前audio_buf中資料的長度
audio_buf_index為當前指向audio_buf的index
之所以設定為static,是因為當呼叫此回撥函式時候,SDL期望的資料長度為len,但是可能我們呼叫audio_decode_frame返回的資料總長度大於len,所以將長度為len的資料賦值給stream,當下一次回撥進來的時候,先根據audio_buf_size和audio_buf_index的大小來進行判斷是否需要再解碼新的資料,如果還有剩餘資料,則繼續將audio_buf中的剩餘資料賦值給stream,此時如果剩餘資料長度小於len,則迴圈解碼新的資料進行賦值。
5)解碼資料
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
static AVFrame frame;
int len1, data_size = 0;
for(;;)
{
while(audio_pkt_size > 0)
{
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if(len1 < 0)
{
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
data_size = 0;
if(got_frame)
{
swr_convert(au_convert_ctx,&audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data , frame.nb_samples);
data_size = out_buffer_size;
/*data_size = av_samples_get_buffer_size(NULL,
aCodecCtx->channels,
frame.nb_samples,
aCodecCtx->sample_fmt,
1);
assert(data_size <= buf_size);
memcpy(audio_buf, frame.data[0], data_size);
*/
}
if(data_size <= 0)
{
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit) {
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0)
{
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
在這個函式中,仍然有幾個static變數,pkt/audio_pkt_data/audio_pkt_size/frame.
首先我們先了解一下解碼函式avcodec_decode_audio4中的一些描述:
- Some decoders may support multiple frames in a single AVPacket. Such
- decoders would then just decode the first frame and the return value would be
- less than the packet size. In this case, avcodec_decode_audio4 has to be
- called again with an AVPacket containing the remaining data in order to
- decode the second frame, etc… Even if no frames are returned, the packet
- needs to be fed to the decoder with remaining data until it is completely
- consumed or an error occurs.
上面大意是,一個AVPacket中可能會有多幀音訊資料,但是avcodec_decode_audio4每次只解碼一幀的資料,因此需要我們多次呼叫此介面來解碼AVPacket中的資料。
根據以上,所以當audio_decode_frame解碼一幀後就返回,當下次進來此函式時候,會繼續將pkt裡面的剩餘資料進行解碼,當將pkt裡面的剩餘資料解碼萬之後,在下次進來的時候會重新從佇列裡面讀取資料,audio_pkt_size和audio_pkt_data則是與此過程相關。
同時,解碼後的資料格式可能不是我們想要的或者SDL2無法播放,則需要呼叫SwrContext進行重取樣,如下:
swr_convert(au_convert_ctx,&audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data , frame.nb_samples);
以上便是幾個重要的函式解析,下面是完整的程式碼:
//採用佇列的方式
//linux下編譯命令如下:
//gcc test_2_pcm.c -o test -I /usr/local/include -L /usr/local/lib -lavformat -lavcodec -lavutil -lswresample
//windows下可以直接套用雷博的工程
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
#include "SDL2/SDL.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <SDL2/SDL.h>
#ifdef __cplusplus
};
#endif
#endif
//#define debug_msg(fmt, args...) printf("--->[%s,%d] " fmt "\n\n", __FUNCTION__, __LINE__, ##args)
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
//48000 * (32/8)
#define SDL_AUDIO_BUFFER_SIZE 1024
struct SwrContext *au_convert_ctx;
int out_buffer_size = -1; //輸出buff長度
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->first_pkt = NULL;
q->last_pkt = NULL;
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt) //佇列為空
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1; //將當前連結串列的最後一個結點的next指向pkt1
q->last_pkt = pkt1; //將last_pkt指向最後一個結點,即pkt1
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
int quit = 0;
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;)
{
if(quit)
{
ret = -1;
break;
}
pkt1 = q->first_pkt; //指向第一個結點,即取出第一個結點
if (pkt1)
{
q->first_pkt = pkt1->next; //即q->first_pkt = q->first_pkt->next 將第一個結點指向的下一個結點設定為first_pkt,可以理解為取出當前第一個結點
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}
else if (!block)
{
ret = 0;
break;
}
else
{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
static AVFrame frame;
int len1, data_size = 0;
for(;;)
{
while(audio_pkt_size > 0)
{
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if(len1 < 0)
{
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
data_size = 0;
if(got_frame)
{
swr_convert(au_convert_ctx,&audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data , frame.nb_samples);
data_size = out_buffer_size;
/*data_size = av_samples_get_buffer_size(NULL,
aCodecCtx->channels,
frame.nb_samples,
aCodecCtx->sample_fmt,
1);
assert(data_size <= buf_size);
memcpy(audio_buf, frame.data[0], data_size);
*/
}
if(data_size <= 0)
{
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit) {
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0)
{
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while(len > 0)
{
if(audio_buf_index >= audio_buf_size) //表示當前audio_buf中已經沒有資料,需要解碼資料了
{
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf,sizeof(audio_buf));
if(audio_size < 0)
{
/* If error, output silence */
audio_buf_size = 1024;
memset(audio_buf, 0, audio_buf_size);
}
else
{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
int test_audio_2_play()
{
AVFormatContext *pFortCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVPacket *pPkt = NULL;
AVFrame*pFrame = NULL;
struct SwrContext *pSwrCtx = NULL;
SDL_AudioSpec wantSpec;
//FILE* outFile = fopen("output.pcm", "wb");
char inFile[] = "skycity1.mp3";
int ret = -1;
int audioIndex = -1;
int i = 0;
int got_picture = -1;
uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO; //通道佈局 輸出雙聲道
enum AVSampleFormat out_sample_fmt=AV_SAMPLE_FMT_S16; //聲音格式
int out_sample_rate=44100; //取樣率
int out_nb_samples = -1;
int out_channels = -1; //通道數
unsigned char *outBuff = NULL;
uint64_t in_chn_layout = -1; //通道佈局
av_register_all();
pFortCtx = avformat_alloc_context();
if (avformat_open_input(&pFortCtx, inFile, NULL, NULL) != 0) //open input file and read data into buf
{
printf("avformat_open_input error!\n");
ret = -1;
goto ERR_1;
}
if (avformat_find_stream_info(pFortCtx, NULL) < 0) //find stream some info
{
printf("avformat_find_stream_info error!\n");
ret = -1;
goto ERR_1;
}
/* find audio index */
for (i = 0; i < pFortCtx->nb_streams; i++)
{
if (pFortCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
break;
}
}
if (-1 == audioIndex)
{
printf("can not find audio index!\n");
ret = -1;
goto ERR_1;
}
printf("------>audioIndex is %d\n", audioIndex);
pCodecCtx = pFortCtx->streams[audioIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (NULL == pCodec)
{
printf("can not find decoder!\n");
ret = -1;
goto ERR_1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec.\n");
ret = -1;
goto ERR_1;
}
if (NULL == (pPkt = (AVPacket *)av_malloc(sizeof(AVPacket))))
{
printf("AV malloc failure.\n");
ret = -1;
goto ERR_2;
}
//out parameter
out_nb_samples = pCodecCtx->frame_size;
out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
outBuff = (unsigned char *)av_malloc(MAX_AUDIO_FRAME_SIZE*2); //雙聲道
printf("-------->out_buffer_size is %d\n",out_buffer_size);
in_chn_layout = av_get_default_channel_layout(pCodecCtx->channels);
pFrame = av_frame_alloc();
//SDL
wantSpec.freq = out_sample_rate;
wantSpec.format = AUDIO_S16SYS;
wantSpec.channels = out_channels;
wantSpec.silence = 0;
wantSpec.samples = out_nb_samples;
wantSpec.callback = audio_callback;
wantSpec.userdata = pCodecCtx;
if (SDL_OpenAudio(&wantSpec, NULL) < 0)
{
printf("can not open SDL!\n");
ret = -1;
goto ERR_3;
}
//Swr
au_convert_ctx=swr_alloc_set_opts(NULL,
out_chn_layout, /*out*/
out_sample_fmt, /*out*/
out_sample_rate, /*out*/
in_chn_layout, /*in*/
pCodecCtx->sample_fmt , /*in*/
pCodecCtx->sample_rate, /*in*/
0,
NULL);
swr_init(au_convert_ctx);
SDL_PauseAudio(0);
while(av_read_frame(pFortCtx, pPkt) >= 0)
{
if (pPkt->stream_index == audioIndex)
{
#if 0
if (avcodec_decode_audio4(pCodecCtx, pFrame, &got_picture, pPkt) < 0)
{
printf("Error in decoding audio frame.\n");
ret = -1;
break;
}
if (got_picture > 0)
{
swr_convert(au_convert_ctx,&outBuff, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);
//fwrite(outBuff, 1, out_buffer_size, outFile);
while(audioLen > 0)
SDL_Delay(1);
audioChunk = (unsigned char *)outBuff;
audioPos = audioChunk;
audioLen = out_buffer_size;
}
#else
packet_queue_put(&audioq, pPkt);
#endif
}
else
av_free_packet(pPkt);
}
sleep(10);
SDL_CloseAudio();
SDL_Quit();
swr_free(&au_convert_ctx);
ERR_3:
av_free(outBuff);
ERR_2:
avcodec_close(pCodecCtx);
avformat_close_input(&pFortCtx);
ERR_1:
avformat_free_context(pFortCtx);
//fclose(outFile);
return ret;
}
int main(int argc, char *argv[])
{
packet_queue_init(&audioq);
//test_audio_2_PCM();
test_audio_2_play();
return 0;
}
最後是一個完整的工程,工程“無恥” ( ̄▽ ̄)~*的使用了雷博的工程,在visual studio 2010上跑通了,直接將程式碼寫入工程編譯執行。
基於FFMPEG+SDL2播放音訊