gstreamer將H264碼流轉為avi視訊檔案示例
是時候記錄怎麼使用gstreamer庫將h264碼流轉為avi、mp4、flv等視訊檔案了。 下圖是本片示例視訊合成的流程圖,其中H264 採集與佇列實現部分程式碼未貼上。 總體思想是,“視訊合成主執行緒”根據視訊資料通道建立gstreamer視訊合成pipeline執行緒,然後剩餘的視訊合成以及檔案儲存操作均由pipeline的appsrc與appsink的回撥函數出現。 視訊合成主執行緒與gstreamer的pipeline執行緒的之間的訊號同步由以下標誌位以及訊號量完成 guint record_flag; sem_t frame_put; sem_t frame_get; sem_t record_on;
關於gstreamer-1.0庫一些元件的使用注意事項:
- appsrc元件,因為每一幀H264的長度不一樣,所以需要每次向其注入資料時,需要重新指定資料長度
- 視訊檔案合成結束時,一定要通過appsrc傳送end of stream訊號,否則非正常結束的檔案,因為檔案資訊不全導致無法播放
- 採用splitmuxsink元件時,不能將其muxer屬性對應的元件設定為flvmux,因為splitmuxsink的video的pad動態申請時只支援video_%u格式,flvmux的為video形式,因此不支援
- 合成視訊時,mux類的元件支援的輸入的H264幀格式不一樣,有的是byte-stream,有的是avc格式,兩者的區別是avc格式的H264幀的分隔符00 00 00 01,應該使用幀長度替換,注意為大端格式,長度放在地1、2、3位元組(從0計數),且高位元組放在左邊1位元組開始。尤其是視訊“I”幀,包含sps、pps、iframe,每個分隔符都要用長度替換,長度不包含分隔符本身。
- h264parse元件存在問題,50K長度附近的的視訊"I"幀,在由byte-stream轉為avc格式時,會出現資料丟失
#define __USE_GNU
#include <sched.h>
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/un.h>
#include <fcntl.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <linux/fb.h>
#include <signal.h>
#include <pthread.h>
#include <semaphore.h>
#include <errno.h>
#include "media.h"
#include "queue.h"
typedef struct _GstDataStruct{
GstElement *pipeline;
GstElement *appsrc;
GstElement *appsink;
//GstElement *h264parse;
//GstElement *muxfile;
GstElement *avimux;
guint sourceid;
guint appsrc_index;
guint appsink_index;
guint bus_watch_id;
GstBus *bus;
GMainLoop *loop; // GLib's Main Loop
REC_MSG *rec_msg;
guint record_flag;
guint ch;
sem_t frame_put;
sem_t frame_get;
sem_t record_on;
unsigned int width;
unsigned int height;
unsigned int fps;
char* filename;
FILE *vfile;
} MuxGstDataStruct;
#define RECORD_DIR "/mnt/httpsrv/av_record/"
#define RECORD_TIME_SEC (3 * 60)
extern unsigned long q_record;
extern int start_wait_iframe0;
extern int start_wait_iframe1;
extern int start_wait_iframe2;
extern int start_wait_iframe3;
extern int had_camera_3;
extern char startup_ID[64];
extern unsigned int bytes_per_frame;
extern int video_base_ts_uninit;
REC_MSG video_rec_msg;
MuxGstDataStruct ch0_AviMuxGst;
MuxGstDataStruct ch1_AviMuxGst;
MuxGstDataStruct ch2_AviMuxGst;
MuxGstDataStruct ch3_AviMuxGst;
unsigned int ch0_online;
unsigned int ch0_width;
unsigned int ch0_height;
unsigned int ch0_fps;
unsigned int ch1_online;
unsigned int ch1_width;
unsigned int ch1_height;
unsigned int ch1_fps;
unsigned int ch2_online;
unsigned int ch2_width;
unsigned int ch2_height;
unsigned int ch2_fps;
unsigned int ch3_online;
unsigned int ch3_width;
unsigned int ch3_height;
unsigned int ch3_fps;
#define MAX_FILE_NAME (96)
char filename0[MAX_FILE_NAME] = {0};
char filename1[MAX_FILE_NAME] = {0};
char filename2[MAX_FILE_NAME] = {0};
char filename3[MAX_FILE_NAME] = {0}; // add by luke zhao 2018.6.14, used for 360 video
static gboolean avi_mux_bus_msg_call(GstBus *bus, GstMessage *msg, MuxGstDataStruct *pAviMuxGst)
{
gchar *debug;
GError *error;
GMainLoop *loop = pAviMuxGst->loop;
GST_DEBUG ("ch:%d, got message %s", pAviMuxGst->ch, gst_message_type_get_name (GST_MESSAGE_TYPE (msg)));
switch (GST_MESSAGE_TYPE(msg))
{
case GST_MESSAGE_EOS:
printf("ch:%d, End of stream\n", pAviMuxGst->ch);
fflush(pAviMuxGst->vfile);
fclose(pAviMuxGst->vfile);
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR:
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("ch:%d, Error: %s\n", pAviMuxGst->ch, error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
default:
break;
}
return TRUE;
}
static void start_feed(GstElement * pipeline, guint size, MuxGstDataStruct *pAviMuxGst)
{
GstFlowReturn ret;
GstBuffer *buffer;
GstMemory *memory;
gpointer data;
gsize len;
sem_wait(&pAviMuxGst->frame_put);
if(pAviMuxGst->record_flag == 0)
{
printf("ch:%d, end of stream change to new file!\n", pAviMuxGst->ch);
g_signal_emit_by_name (pAviMuxGst->appsrc, "end-of-stream", &ret);
}
else
{
data = (gpointer)video_rec_msg.frame;
len = (gsize)video_rec_msg.used_size;
char szTemp[64] = {0};
sprintf(szTemp, "%d", video_rec_msg.used_size);
g_object_set(G_OBJECT(pAviMuxGst->appsrc), "blocksize", szTemp, NULL);
gst_app_src_set_size (pAviMuxGst->appsrc, len);
//printf("ch:%d, get frame:%p, len:%d, szTemp:%s!!!!\n", pAviMuxGst->ch, data, len, szTemp);
pAviMuxGst->appsrc_index++;
buffer = gst_buffer_new();
memory = gst_memory_new_wrapped(GST_MEMORY_FLAG_READONLY, data, len, 0, len, NULL, NULL);
gst_buffer_append_memory (buffer, memory);
g_signal_emit_by_name (pAviMuxGst->appsrc, "push-buffer", buffer, &ret);
gst_buffer_unref(buffer);
}
sem_post(&pAviMuxGst->frame_get);
}
static void stop_feed(GstElement * pipeline, MuxGstDataStruct *pAviMuxGst)
{
g_print("ch:%d, stop feed ...................\n", pAviMuxGst->ch);
// if (pMuxGstData->sourceid != 0)
// {
// //GST_DEBUG ("ch:%d, stop feeding...\n", pAviMuxGst->ch);
// g_source_remove (pAviMuxGst->sourceid);
// pAviMuxGst->sourceid = 0;
// }
}
static void new_sample_on_appsink (GstElement *sink, MuxGstDataStruct *pAviMuxGst)
{
int ret = 0;
GstSample *sample = NULL;
struct timeval tvl;
gettimeofday(&tvl, NULL);
g_signal_emit_by_name (sink, "pull-sample", &sample);
if(sample)
{
pAviMuxGst->appsink_index++;
GstBuffer *buffer = gst_sample_get_buffer(sample);
GstMapInfo info;
if(gst_buffer_map((buffer), &info, GST_MAP_READ))
{
//printf("ch:%d, mux appsink rcv data len:%d time: %d, index:%d!\n", pAviMuxGst->ch,
// (unsigned int)info.size, (unsigned int)tvl.tv_sec, pAviMuxGst->appsink_index);
fwrite(info.data, info.size, 1, pAviMuxGst->vfile);
fflush(pAviMuxGst->vfile);
gst_buffer_unmap(buffer, &info);
}
gst_sample_unref(sample);
}
}
static int thr_avi_mux_gst_pipeline(void* args)
{
MuxGstDataStruct *pAviMuxGst;
//char elementname[32] = {0};
cpu_set_t mask;
__CPU_ZERO_S (sizeof (cpu_set_t), &mask);
__CPU_SET_S (0, sizeof (cpu_set_t), &mask);
pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
re_start:
pAviMuxGst = (MuxGstDataStruct*)args;
printf("============= ch:%d, mux gst init start ============\n", pAviMuxGst->ch);
gst_init (NULL, NULL);
printf("============ ch:%d, create mux pipeline ============\n", pAviMuxGst->ch);
printf("===== ch:%d, width:%d, height:%d, framerate:%d =====\n",
pAviMuxGst->ch, pAviMuxGst->width, pAviMuxGst->height, pAviMuxGst->fps);
pAviMuxGst->pipeline = gst_pipeline_new ("avimux pipeline");
pAviMuxGst->appsrc = gst_element_factory_make ("appsrc", "appsrc");
pAviMuxGst->appsink = gst_element_factory_make ("appsink", "appsink");
//pAviMuxGst->h264parse = gst_element_factory_make ("h264parse", "h264parse");
pAviMuxGst->avimux = gst_element_factory_make ("avimux", "avimux");
//pAviMuxGst->muxfile = gst_element_factory_make ("filesink", "filesink");
if (!pAviMuxGst->appsrc || !pAviMuxGst->avimux || !pAviMuxGst->appsink)
{
g_printerr ("ch:%d:not all element could be created... Exit\n", pAviMuxGst->ch);
printf("ch:%d, appsrc:%p, mux:%p, appsink:%p !!\n", pAviMuxGst->ch,
pAviMuxGst->appsrc, pAviMuxGst->avimux, pAviMuxGst->appsink);
return -1;
}
printf("============= ch:%d, link mux pipeline =============\n", pAviMuxGst->ch);
g_object_set(G_OBJECT(pAviMuxGst->appsrc), "stream-type", 0, "format", GST_FORMAT_TIME, NULL);
g_object_set(G_OBJECT(pAviMuxGst->appsrc), "min-percent", 0, NULL);
GstCaps *caps_h264_byte;
caps_h264_byte = gst_caps_new_simple("video/x-h264", "stream-format", G_TYPE_STRING,"byte-stream",
"alignment", G_TYPE_STRING, "au",
"width", G_TYPE_INT, pAviMuxGst->width,
"height", G_TYPE_INT, pAviMuxGst->height,
"framerate",GST_TYPE_FRACTION, pAviMuxGst->fps, 1, NULL);
g_object_set(G_OBJECT(pAviMuxGst->appsrc), "caps", caps_h264_byte, NULL);
g_signal_connect(pAviMuxGst->appsrc, "need-data", G_CALLBACK(start_feed), (gpointer)pAviMuxGst);
g_signal_connect(pAviMuxGst->appsrc, "enough-data", G_CALLBACK(stop_feed), (gpointer)pAviMuxGst);
// g_object_set(G_OBJECT(pAviMuxGst->muxfile), "location", pAviMuxGst->filename,
// "sync", FALSE, "buffer-mode", 2, NULL);
g_object_set(G_OBJECT(pAviMuxGst->appsink), "emit-signals", TRUE, "sync", FALSE, "async", FALSE, NULL);
g_signal_connect(pAviMuxGst->appsink, "new-sample", G_CALLBACK(new_sample_on_appsink), pAviMuxGst);
gst_bin_add_many(GST_BIN(pAviMuxGst->pipeline),pAviMuxGst->appsrc, pAviMuxGst->avimux, pAviMuxGst->appsink, NULL);
if(gst_element_link_filtered(pAviMuxGst->appsrc, pAviMuxGst->avimux, caps_h264_byte) != TRUE)
{
g_printerr ("ch:%d, pAviMuxGst->appsrc could not link pAviMuxGst->avimux\n", pAviMuxGst->ch);
gst_object_unref (pAviMuxGst->pipeline);
return -1;
}
if(gst_element_link(pAviMuxGst->avimux, pAviMuxGst->appsink) != TRUE)
{
g_printerr ("ch:%d, pAviMuxGst->h264parse could not link pAviMuxGst->appsink\n", pAviMuxGst->ch);
gst_object_unref (pAviMuxGst->pipeline);
return -1;
}
gst_caps_unref (caps_h264_byte);
pAviMuxGst->bus = gst_pipeline_get_bus(GST_PIPELINE(pAviMuxGst->pipeline));
pAviMuxGst->bus_watch_id = gst_bus_add_watch(pAviMuxGst->bus, (GstBusFunc)avi_mux_bus_msg_call, (gpointer)pAviMuxGst);
gst_object_unref(pAviMuxGst->bus);
printf("=========== ch:%d, link mux pipeline ok ! ============\n", pAviMuxGst->ch);
printf("======== ch:%d, mux pipeline start to playing! =======\n", pAviMuxGst->ch);
pAviMuxGst->record_flag = 1;
sem_post(&pAviMuxGst->record_on);
gst_element_set_state (pAviMuxGst->pipeline, GST_STATE_PLAYING);
pAviMuxGst->loop = g_main_loop_new(NULL, FALSE); // Create gstreamer loop
g_main_loop_run(pAviMuxGst->loop); // Loop will run until receiving EOS (end-of-stream), will block here
printf("== ch:%d, g_main_loop_run returned, stopping record ==\n", pAviMuxGst->ch);
gst_element_set_state (pAviMuxGst->pipeline, GST_STATE_NULL); // Stop pipeline to be released
printf("============ ch:%d, deleting mux pipeline ============\n", pAviMuxGst->ch);
gst_object_unref (pAviMuxGst->pipeline); // THis will also delete all pipeline elements
g_source_remove(pAviMuxGst->bus_watch_id);
g_main_loop_unref(pAviMuxGst->loop);
if(pAviMuxGst->record_flag == 0)
{
printf("======= ch:%d, pipeline going to restart! =======\n", pAviMuxGst->ch);
goto re_start;
}
return 0;
}
void *thr_record(void *arg)
{
int ret;
unsigned int frame_size;
FILE *afp0 = NULL;
FILE *afp1 = NULL;
unsigned int count0 = 0;
unsigned int count1 = 0;
unsigned int count2 = 0;
unsigned int count3 = 0; // add by luke zhao 2018.6.14, used for 360 video
int file_count0 = 0;
int file_count1 = 0;
int file_count2 = 0;
int file_count3 = 0; // add by luke zhao 2018.6.14, used for 360 video
int record_wait_iframe0 = 0;
int record_wait_iframe1 = 0;
int record_wait_iframe2 = 0;
int record_wait_iframe3 = 0; // add by luke zhao 2018.6.14, used for 360 video
char audiofilename0[MAX_FILE_NAME] = {0};
char audiofilename1[MAX_FILE_NAME] = {0};
char filename0_index[128] = {0};
char filename1_index[128] = {0};
char filename2_index[128] = {0};
char filename3_index[128] = {0}; // add by luke zhao 2018.6.14, used for 360 video
struct media_header head = {0};
struct timeval tvl;
struct timeval ch0_video_base_time;
struct timeval ch1_video_base_time;
struct timeval ch2_video_base_time;
struct timeval ch3_video_base_time;
int audio0_fsize = 0;
int audio1_fsize = 0;
int audio0_stop = 0;
int audio1_stop = 0;
pthread_t tid;
cpu_set_t mask;
__CPU_ZERO_S (sizeof (cpu_set_t), &mask);
__CPU_SET_S (0, sizeof (cpu_set_t), &mask);
pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
memset((unsigned char*)&ch0_AviMuxGst, 0, sizeof(MuxGstDataStruct));
memset((unsigned char*)&ch1_AviMuxGst, 0, sizeof(MuxGstDataStruct));
memset((unsigned char*)&ch2_AviMuxGst, 0, sizeof(MuxGstDataStruct));
memset((unsigned char*)&ch3_AviMuxGst, 0, sizeof(MuxGstDataStruct));
sem_init(&ch0_AviMuxGst.frame_put, 0, 0);
sem_init(&ch0_AviMuxGst.frame_get, 0, 0);
相關推薦
gstreamer將H264碼流轉為avi視訊檔案示例
是時候記錄怎麼使用gstreamer庫將h264碼流轉為avi、mp4、flv等視訊檔案了。
下圖是本片示例視訊合成的流程圖,其中H264 採集與佇列實現部分程式碼未貼上。
總體思想是,“視訊合成主執行緒”根據視訊資料通道建立gstreamer視訊合成pipe
將H264碼流打包成RTP包
H264碼流打包成RTP包的程式碼如下:#include <stdio.h>
#include <stdlib.h>
#include <conio.h>
#include <string.h>
#incl
【視訊開發】【Live555】live555實現h264碼流RTSP傳輸
1.概述
liveMedia 庫中有一系列類,基類是Medium,這些類針對不同的流媒體型別和編碼。 其中的StreamFrame類檔案(如MPEG4VideoStreamFramer)為流傳輸關鍵。
2 重要概念:
StreamFrame類:該類繼承Framed
關於對H264碼流的PS的封裝的相關代碼實現
真心 clip gef 但是 占用 udp 大致 結果 方法
轉自:http://www.cnblogs.com/lidabo/p/6604988.html
1、寫在開始之前:
最近因為新工作要維護別人留下的GB模塊代碼,先熟悉了流程,然後也試著封裝
關於對H264碼流的TS的封裝的相關代碼實現
有效 當前 完成 read ble tco and mark comm
轉自:http://www.cnblogs.com/lidabo/p/6604998.html
1 寫在開始之前
在前段時間有分享一個H264封裝ps流到相關文章的,這次和
H264碼流中SPS PPS詳解<轉>
擴展 vlc 地址 逗號 部分 級別 軟件 第一個 bottom
轉載地址:https://zhuanlan.zhihu.com/p/27896239
1 SPS和PPS從何處而來?
2 SPS和PPS中的每個參數起什麽作用?
3 如何解析SDP中
0007-用OpenCV的VideoCapture類讀取avi視訊檔案,並以幀流的形式顯示出來!
OpenCV用VideoCapture類實現avi視訊讀取的相關操作,具體怎麼使用,大家看程式碼便知!
示例程式碼如下: 程式碼中用的視訊下載連結:http://pan.baidu.com/s/1qYbRtqW 密碼:5bcu
//opencv版本:OpenCV3.0
//VS版本:VS20
FFmpeg In Android - H264碼流解碼/OpenGL ES渲染
主要思路是FFmpeg解碼H264得到一張yuv420p圖片後,傳遞給opengl es在著色器內部做圖片轉換yuv->rgb,然後通過紋理貼圖的方式渲染出來.這種方式的效率更高.核心程式碼如下:
#include "common.h"
#include "gl_util.h"
FFmpeg In Android - H264碼流解碼/儲存Yuv
本節例子原始碼_NativeH264Android,修改自ffmpeg原始碼目錄/doc/examples/decode_video.c
H264的碼流結構 H.264原始碼流(又稱為“裸流”)是由一個一個的NALU組成的,包括I幀,B幀,P幀等等,他們的結構如下圖所示:
其中每個
H264碼流中SPS PPS詳解
轉載地址:https://zhuanlan.zhihu.com/p/27896239
1 SPS和PPS從何處而來?
2 SPS和PPS中的每個引數起什麼作用?
3 如何解析SDP中包含的H.264的SPS和PPS串?
1 客戶端抓包
在做客戶端視訊解碼時,一
C#將圖片位元組流轉為Base64直接放入html的img標籤src屬性中
原地址:http://www.cnblogs.com/RedSky/p/5786703.html點選開啟連結
1,圖片要轉為byte[],
2,注意加上“data:image/jpeg;base64,”,這裡jpeg可以換成其他。
string html = "&
h264碼流rtp打包(一)
一幀image編碼完的資料儲存在h264buffer中,編碼後的h264碼流的大小為nH264Size
因為對於NALU,並不是一幀對應一個NALU,而是對於SLICE而言,一個slice就封裝層一個nal,所以一幀可以有多個slice,即一幀有多個nal。
H264編碼器11( H.264 探索 第二部分 H264碼流格式)
來自:https://segmentfault.com/a/1190000006698552
表1中描述了所有可能的資料包型別。
Type
Definition
0
Undefined
H264編碼器8( H264碼流打包分析(精華))
來自:https://www.cnblogs.com/lidabo/p/4602422.html
H264碼流打包分析
SODB 資料位元串-->最原始的編碼資料
RBSP 原始位元組序列載荷-->在SODB的後面填加了結尾位元(RBSP trailing bits 一個bit“1”)若
Wireshark提取RTP包中的H264碼流
1-- Dump RTP h.264 payload to raw h.264 file (*.264)
2-- According to RFC3984 to dissector H264 payload of RTP to NALU,
and
write it
3--
RTP協議全解析(H264碼流和PS流)
寫在前面:RTP的解析,網上找了很多資料,但是都不全,所以我力圖整理出一個比較全面的解析,
其中借鑑了很多文章,我都列在了文章最後,在此表示感謝。
網際網路的發展離不開大家的無私奉獻,我決定從我做起,希望大家支援。
1、RTP Header解析
實現對rtp H264碼流的組幀
rtp打包h264,包含了三種類型的包:
一個rtp包攜帶了一幀資料(single)
多個rtp包攜帶了一幀資料(FU-A)
一個rtp包攜帶了多幀資料(STAP-A)
在實際應用中絕大部分採用的是前兩種方式,對方式1常見的是對nalu的sps,pps進行打包
RTP協議解析和H264碼流提取
一、 h264基礎概念SODB: 資料位元串-->最原始的編碼資料RBSP: 原始位元組序列載荷-->在SODB的後面填加了結尾位元(RBSP trailing bits 一個bit“1”)若干位元“0”,以便位元組對齊。EBSP: 擴充套件位元組序列載荷– >在RBS
從wireshark中獲取H264碼流詳解
1、首先從https://github.com/volvet/h264extractor打包下載
2、讀README.md 全文如下:
# h264extractor
wireshark plugin to extract h264 stream from rt
流媒體開發: RTP協議全解析(H264碼流和PS流)
1、RTP Header解析