1. 程式人生 > >最簡單的基於FFmpeg的AVDevice例子(讀取攝像頭)解讀

最簡單的基於FFmpeg的AVDevice例子(讀取攝像頭)解讀

本文轉載自最簡單的基於FFmpeg的AVDevice例子(讀取攝像頭)

在此基礎上對程式的流程進行解讀,閱讀前請先閱讀原文。

=============================

/**
 * 最簡單的基於FFmpeg的AVDevice例子(讀取攝像頭)
 * Simplest FFmpeg Device (Read Camera)
 *
 * 雷霄驊 Lei Xiaohua
 * [email protected]
 * 中國傳媒大學/數字電視技術
 * Communication University of China / Digital TV Technology
 * http://blog.csdn.net/leixiaohua1020
 *
 * 本程式實現了本地攝像頭資料的獲取解碼和顯示。是基於FFmpeg
 * 的libavdevice類庫最簡單的例子。通過該例子,可以學習FFmpeg中
 * libavdevice類庫的使用方法。
 * 本程式在Windows下可以使用2種方式讀取攝像頭資料:
 *  1.VFW: Video for Windows 螢幕捕捉裝置。注意輸入URL是裝置的序號,
 *          從0至9。
 *  2.dshow: 使用Directshow。注意作者機器上的攝像頭裝置名稱是
 *         “Integrated Camera”,使用的時候需要改成自己電腦上攝像頭設
 *          備的名稱。
 * 在Linux下可以使用video4linux2讀取攝像頭裝置。
 * 在MacOS下可以使用avfoundation讀取攝像頭裝置。
 *
 * This software read data from Computer's Camera and play it.
 * It's the simplest example about usage of FFmpeg's libavdevice Library.
 * It's suiltable for the beginner of FFmpeg.
 * This software support 2 methods to read camera in Microsoft Windows:
 *  1.gdigrab: VfW (Video for Windows) capture input device.
 *             The filename passed as input is the capture driver number,
 *             ranging from 0 to 9.
 *  2.dshow: Use Directshow. Camera's name in author's computer is
 *             "Integrated Camera".
 * It use video4linux2 to read Camera in Linux.
 * It use avfoundation to read Camera in MacOS.
 *
 */


#include <stdio.h>

#define __STDC_CONSTANT_MACROS

#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "SDL/SDL.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <SDL/SDL.h>
#ifdef __cplusplus
};
#endif
#endif

//Output YUV420P
#define OUTPUT_YUV420P 0
//'1' Use Dshow
//'0' Use VFW
#define USE_DSHOW 0


//Refresh Event
#define SFM_REFRESH_EVENT  (SDL_USEREVENT + 1)

#define SFM_BREAK_EVENT  (SDL_USEREVENT + 2)

int thread_exit=0;

int sfp_refresh_thread(void *opaque)
{
	thread_exit=0;
	while (!thread_exit) {
		SDL_Event event;
		event.type = SFM_REFRESH_EVENT;
		SDL_PushEvent(&event);
		SDL_Delay(40);
	}
	thread_exit=0;
	//Break
	SDL_Event event;
	event.type = SFM_BREAK_EVENT;
	SDL_PushEvent(&event);

	return 0;
}


//Show Dshow Device
void show_dshow_device(){
	AVFormatContext *pFormatCtx = avformat_alloc_context();
	AVDictionary* options = NULL;
	av_dict_set(&options,"list_devices","true",0);
	AVInputFormat *iformat = av_find_input_format("dshow");
	printf("========Device Info=============\n");
	avformat_open_input(&pFormatCtx,"video=dummy",iformat,&options);
	printf("================================\n");
}

//Show Dshow Device Option
void show_dshow_device_option(){
	AVFormatContext *pFormatCtx = avformat_alloc_context();
	AVDictionary* options = NULL;
	av_dict_set(&options,"list_options","true",0);
	AVInputFormat *iformat = av_find_input_format("dshow");
	printf("========Device Option Info======\n");
	avformat_open_input(&pFormatCtx,"video=Integrated Camera",iformat,&options);
	printf("================================\n");
}

//Show VFW Device
void show_vfw_device(){
	AVFormatContext *pFormatCtx = avformat_alloc_context();
	AVInputFormat *iformat = av_find_input_format("vfwcap");
	printf("========VFW Device Info======\n");
	avformat_open_input(&pFormatCtx,"list",iformat,NULL);
	printf("=============================\n");
}

//Show AVFoundation Device
void show_avfoundation_device(){
    AVFormatContext *pFormatCtx = avformat_alloc_context();
    AVDictionary* options = NULL;
    av_dict_set(&options,"list_devices","true",0);
    AVInputFormat *iformat = av_find_input_format("avfoundation");
    printf("==AVFoundation Device Info===\n");
    avformat_open_input(&pFormatCtx,"",iformat,&options);
    printf("=============================\n");
}


int main(int argc, char* argv[])
{

	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	//Open File
	//char filepath[]="src01_480x272_22.h265";
	//avformat_open_input(&pFormatCtx,filepath,NULL,NULL)

	//Register Device
	avdevice_register_all();

//Windows
#ifdef _WIN32

	//Show Dshow Device
	show_dshow_device();
	//Show Device Options
	show_dshow_device_option();
    //Show VFW Options
    show_vfw_device();

#if USE_DSHOW
	AVInputFormat *ifmt=av_find_input_format("dshow");
	//Set own video device's name
	if(avformat_open_input(&pFormatCtx,"video=Integrated Camera",ifmt,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
#else
	AVInputFormat *ifmt=av_find_input_format("vfwcap");
	if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
#endif
#elif defined linux
    //Linux
	AVInputFormat *ifmt=av_find_input_format("video4linux2");
	if(avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
#else
    show_avfoundation_device();
    //Mac
    AVInputFormat *ifmt=av_find_input_format("avfoundation");
    //Avfoundation
    //[video]:[audio]
    if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0){
        printf("Couldn't open input stream.\n");
        return -1;
    }
#endif


	if(avformat_find_stream_info(pFormatCtx,NULL)<0)
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
		{
			videoindex=i;
			break;
		}
	if(videoindex==-1)
	{
		printf("Couldn't find a video stream.\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL)
	{
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
	{
		printf("Could not open codec.\n");
		return -1;
	}
	AVFrame	*pFrame,*pFrameYUV;
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	//unsigned char *out_buffer=(unsigned char *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	//avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	//SDL----------------------------
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		printf( "Could not initialize SDL - %s\n", SDL_GetError());
		return -1;
	}
	int screen_w=0,screen_h=0;
	SDL_Surface *screen;
	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);

	if(!screen) {
		printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());
		return -1;
	}
	SDL_Overlay *bmp;
	bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
	SDL_Rect rect;
	rect.x = 0;
	rect.y = 0;
	rect.w = screen_w;
	rect.h = screen_h;
	//SDL End------------------------
	int ret, got_picture;

	AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

#if OUTPUT_YUV420P
    FILE *fp_yuv=fopen("output.yuv","wb+");
#endif

	struct SwsContext *img_convert_ctx;
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	//------------------------------
	SDL_Thread *video_tid = SDL_CreateThread(sfp_refresh_thread,NULL);
	//
	SDL_WM_SetCaption("Simplest FFmpeg Read Camera",NULL);
	//Event Loop
	SDL_Event event;

	for (;;) {
		//Wait
		SDL_WaitEvent(&event);
		if(event.type==SFM_REFRESH_EVENT){
			//------------------------------
			if(av_read_frame(pFormatCtx, packet)>=0){
				if(packet->stream_index==videoindex){
					ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
					if(ret < 0){
						printf("Decode Error.\n");
						return -1;
					}
					if(got_picture){
						SDL_LockYUVOverlay(bmp);
						pFrameYUV->data[0]=bmp->pixels[0];
						pFrameYUV->data[1]=bmp->pixels[2];
						pFrameYUV->data[2]=bmp->pixels[1];
						pFrameYUV->linesize[0]=bmp->pitches[0];
						pFrameYUV->linesize[1]=bmp->pitches[2];
						pFrameYUV->linesize[2]=bmp->pitches[1];
						sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
						int y_size=pCodecCtx->width*pCodecCtx->height;
						fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y
						fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
						fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif

						SDL_UnlockYUVOverlay(bmp);

						SDL_DisplayYUVOverlay(bmp, &rect);

					}
				}
				av_free_packet(packet);
			}else{
				//Exit Thread
				thread_exit=1;
			}
		}else if(event.type==SDL_QUIT){
			thread_exit=1;
		}else if(event.type==SFM_BREAK_EVENT){
			break;
		}

	}


	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P
    fclose(fp_yuv);
#endif

	SDL_Quit();

	//av_free(out_buffer);
	av_free(pFrameYUV);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}

本文基於linux系統部分進行解讀,程式總體可分為兩部分工作:

  1. Read Camera:開啟video(/dev/video0)裝置,讀取並解碼video packet;
  2. SDL Display:將解碼得到的video raw資料進行縮放和pixel format轉換,並將轉換後的raw資料通過基於SDL的視訊播放器顯示出來。

(關於SDL播放器部分,可以參考原作者的100行程式碼實現最簡單的基於FFMPEG+SDL的視訊播放器(SDL1.x))

(對於程式碼中相關structure和API的解釋可以參考原作者的系列文章[總結]FFMPEG視音訊編解碼零基礎學習方法)

 

Read Camera:

  • 通過avformat_alloc_context()分配一個控制代碼pFormatCtx,後續的讀取video資料,解碼video資料都是基於這個控制代碼;
  • 三步操作完成開啟video device的操作,並填充完善了pFormatCtx控制代碼: avdevice_register_all(); av_find_input_format("video4linux2"); avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL);;
  • try讀取一部分視音訊資料並且獲得一些相關的資訊: avformat_find_stream_info(pFormatCtx, NULL);
  • 在pFormatCtx中遍歷找到video stream: pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO;
  • pCodec=avcodec_find_decoder(pCodecCtx->codec_id): 查詢video stream相應的decoder(這裡取到的當然是raw資料,可以檢視這一路stream的AVCodecID確認,會發現確實是AV_CODEC_ID_RAWVIDEO);
  • 開啟video stream相應的decoder: avcodec_open2(pCodecCtx, pCodec,NULL);
  • 申請兩個AVFrame buffer:pFrame和pFrameYUV, 分別存放從video stream中解碼得到取到的raw data,以及經過裝換(主要是pixel format和video size的裝換,會在SDL Displayt部分講述)後的raw data;
  • 申請一個AVPacket buffer:packet, 用來儲存最開始取到的video編碼資料(AV_CODEC_ID_RAWVIDEO), 因為我們取到的就是raw資料,後面我們且稱這部分資料為“編碼資料”;
  • 讀取編碼資料: av_read_frame(pFormatCtx, packet);
  • 解碼packet中的資料並將其存入pFrame中: avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet)。

SDL Display:

  • 建立window: screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);
  • 建立overlay 層(pixel format SDL_YV12_OVERLAY,這一pixel format就是解碼後需要轉換至的pixel format),相應的YUV資料儲存在bmp的三個buffer裡: bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
  • 初始化一個SwsContext型別控制代碼img_convert_ctx這裡面記錄了video size(又名resolution解析度)和pixle format是如何轉變的(由原始的A size轉換到B size,由A format轉換至AV_PIX_FMT_YUV420P也就是上面一條講的SDL_YV12_OVERLAY格式,從API的引數裡可以直觀看出): img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  • 執行video size變換和pixel format轉換,轉換後的資料儲存在pFrameYUV(或者bmp中,兩者地址相同): sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
  • 顯示data到window上: SDL_DisplayYUVOverlay(bmp, &rect);
  • 最後補充說明一點下面這段程式碼的含義,可能是SDL_YV12_OVERLAY和AV_PIX_FMT_YUV420P這兩種格式的uv資料排列方式不一樣,例如一個是y;u;v排列,另一個是y;v;u排列。
    pFrameYUV->data[0]=bmp->pixels[0];
    pFrameYUV->data[1]=bmp->pixels[2];
    pFrameYUV->data[2]=bmp->pixels[1];