1. 程式人生 > >Ffplay+QT播放器

Ffplay+QT播放器

簡介

在Windows環境使用QT Creator 4.4.1編譯ffplay(Ffmpeg3.3.3版本):

  • 去掉了引數的解析,目前只支援內部寫死檔案路徑;
  • 支援按鍵事件,比如左右箭頭seek,空格鍵暫停等;
  • 在QT編譯ffplay的目的是為了方便debug,以方便觀察AVPacket、AVFrame等結構體的變數。

ffplay模組劃分

  • 解析輸入
  • 開啟碼流
  • 音視訊佇列
  • 音視訊解碼
  • 播放控制
    • 開始播放
    • 停止播放
    • 暫停播放
    • 跳到(seek)指定位置播放
  • 音訊輸出
  • 視訊輸出
  • ffplay快捷鍵支援

開發環境

  • Windows7
  • QT 5.9.2 + Creator 4.4.1
  • 第三方庫
    • FFMPEG 用來讀取碼流以及解碼
    • SDL2 用來播放聲音、顯示畫面

程式碼

下載地址

編譯執行

將原始碼dll目的可執行檔案和視訊檔案拷貝到程式執行目錄,播放檔案也可以自己指定位置,比如

char fileName[256] = {"G:\\迅雷下載\\繡春刀II:修羅戰場.HD.720p.國語中字.mkv"};

要注意路徑斜槓”\”的問題。

這裡寫圖片描述

程式碼預覽

/*
 * Copyright (c) 2003 Fabrice Bellard
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */
/** * @file * simple media player based on the FFmpeg libraries */ //#include "config.h" #include <inttypes.h> #include <math.h> #include <limits.h> #include <signal.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif #include "libavutil/avstring.h" #include "libavutil/eval.h"
#include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "libavutil/dict.h" #include "libavutil/parseutils.h" #include "libavutil/samplefmt.h" #include "libavutil/avassert.h" #include "libavutil/time.h" #include "libavutil/display.h" #include "libavformat/avformat.h" #include "libavdevice/avdevice.h" #include "libswscale/swscale.h" #include "libavutil/opt.h" #include "libavcodec/avfft.h" #include "libswresample/swresample.h" #define CONFIG_AVFILTER 0 #if CONFIG_AVFILTER # include "libavfilter/avfilter.h" # include "libavfilter/buffersink.h" # include "libavfilter/buffersrc.h" #endif #include <SDL.h> #include <SDL_thread.h> #ifdef __cplusplus } #endif #define _WIN32 #ifdef _WIN32 #include <windows.h> #endif #include <assert.h> AVDictionary *sws_dict; AVDictionary *swr_opts; AVDictionary *format_opts, *codec_opts, *resample_opts; const char program_name[] = "ffplay"; const int program_birth_year = 2003; #define MAX_QUEUE_SIZE (15 * 1024 * 1024) #define MIN_FRAMES 25 #define EXTERNAL_CLOCK_MIN_FRAMES 2 #define EXTERNAL_CLOCK_MAX_FRAMES 10 /* Minimum SDL audio buffer size, in samples. */ #define SDL_AUDIO_MIN_BUFFER_SIZE 512 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */ #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30 /* Step size for volume control in dB */ #define SDL_VOLUME_STEP (0.75) /* no AV sync correction is done if below the minimum AV sync threshold */ #define AV_SYNC_THRESHOLD_MIN 0.04 /* AV sync correction is done if above the maximum AV sync threshold */ #define AV_SYNC_THRESHOLD_MAX 0.1 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */ #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1 /* no AV correction is done if too big error */ #define AV_NOSYNC_THRESHOLD 10.0 /* maximum audio speed change to get correct sync */ #define SAMPLE_CORRECTION_PERCENT_MAX 10 /* external clock speed adjustment constants for realtime sources based on buffer fullness */ #define EXTERNAL_CLOCK_SPEED_MIN 0.900 #define EXTERNAL_CLOCK_SPEED_MAX 1.010 #define EXTERNAL_CLOCK_SPEED_STEP 0.001 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */ #define AUDIO_DIFF_AVG_NB 20 /* polls for possible required screen refresh at least this often, should be less than 1/fps */ #define REFRESH_RATE 0.01 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */ /* TODO: We assume that a decoded and resampled frame fits into this buffer */ #define SAMPLE_ARRAY_SIZE (8 * 65536) #define CURSOR_HIDE_DELAY 1000000 #define USE_ONEPASS_SUBTITLE_RENDER 1 static unsigned sws_flags = SWS_BICUBIC; typedef struct MyAVPacketList { AVPacket pkt; struct MyAVPacketList *next; int serial; } MyAVPacketList; typedef struct PacketQueue { MyAVPacketList *first_pkt, *last_pkt; int nb_packets; int size; int64_t duration; int abort_request; int serial; SDL_mutex *mutex; SDL_cond *cond; } PacketQueue; #define VIDEO_PICTURE_QUEUE_SIZE 3 #define SUBPICTURE_QUEUE_SIZE 16 #define SAMPLE_QUEUE_SIZE 9 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE)) typedef struct AudioParams { int freq; int channels; int64_t channel_layout; enum AVSampleFormat fmt; int frame_size; int bytes_per_sec; } AudioParams; typedef struct Clock { double pts; /* clock base */ double pts_drift; /* clock base minus time at which we updated the clock */ double last_updated; double speed; int serial; /* clock is based on a packet with this serial */ int paused; int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */ } Clock; /* Common struct for handling all types of decoded data and allocated render buffers. */ typedef struct Frame { AVFrame *frame; AVSubtitle sub; int serial; double pts; /* presentation timestamp for the frame */ double duration; /* estimated duration of the frame */ int64_t pos; /* byte position of the frame in the input file */ int width; int height; int format; AVRational sar; int uploaded; int flip_v; } Frame; typedef struct FrameQueue { Frame queue[FRAME_QUEUE_SIZE]; int rindex; int windex; int size; int max_size; int keep_last; int rindex_shown; SDL_mutex *mutex; SDL_cond *cond; PacketQueue *pktq; } FrameQueue; enum { AV_SYNC_AUDIO_MASTER, /* default choice */ AV_SYNC_VIDEO_MASTER, AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */ }; // 解碼器 typedef struct Decoder { AVPacket pkt; AVPacket pkt_temp; PacketQueue *queue; AVCodecContext *avctx; int pkt_serial; int finished; int packet_pending; SDL_cond *empty_queue_cond; int64_t start_pts; AVRational start_pts_tb; int64_t next_pts; AVRational next_pts_tb; SDL_Thread *decoder_tid; } Decoder; enum ShowMode { SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB } ; typedef struct VideoState { SDL_Thread *read_tid; AVInputFormat *iformat; int abort_request; int force_refresh; int paused; int last_paused; int queue_attachments_req; int seek_req; int seek_flags; int64_t seek_pos; int64_t seek_rel; int read_pause_return; AVFormatContext *ic; int realtime; Clock audclk; Clock vidclk; Clock extclk; FrameQueue pictq; FrameQueue subpq; FrameQueue sampq; Decoder auddec; Decoder viddec; Decoder subdec; int audio_stream; int av_sync_type; double audio_clock; int audio_clock_serial; double audio_diff_cum; /* used for AV difference average computation */ double audio_diff_avg_coef; double audio_diff_threshold; int audio_diff_avg_count; AVStream *audio_st; PacketQueue audioq; int audio_hw_buf_size; uint8_t *audio_buf; uint8_t *audio_buf1; unsigned int audio_buf_size; /* in bytes */ unsigned int audio_buf1_size; int audio_buf_index; /* in bytes */ int audio_write_buf_size; int audio_volume; int muted; struct AudioParams audio_src; #if CONFIG_AVFILTER struct AudioParams audio_filter_src; #endif struct AudioParams audio_tgt; struct SwrContext *swr_ctx; int frame_drops_early; int frame_drops_late; int16_t sample_array[SAMPLE_ARRAY_SIZE]; int sample_array_index; int last_i_start; RDFTContext *rdft; int rdft_bits; FFTSample *rdft_data; int xpos; double last_vis_time; SDL_Texture *vis_texture; SDL_Texture *sub_texture; SDL_Texture *vid_texture; int subtitle_stream; AVStream *subtitle_st; PacketQueue subtitleq; double frame_timer; double frame_last_returned_time; double frame_last_filter_delay; int video_stream; AVStream *video_st; PacketQueue videoq; double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity struct SwsContext *img_convert_ctx; struct SwsContext *sub_convert_ctx; int eof; char *filename; int width, height, xleft, ytop; int step; #if CONFIG_AVFILTER int vfilter_idx; AVFilterContext *in_video_filter; // the first filter in the video chain AVFilterContext *out_video_filter; // the last filter in the video chain AVFilterContext *in_audio_filter; // the first filter in the audio chain AVFilterContext *out_audio_filter; // the last filter in the audio chain AVFilterGraph *agraph; // audio filter graph #endif int last_video_stream, last_audio_stream, last_subtitle_stream; SDL_cond *continue_read_thread; enum ShowMode show_mode; } VideoState; /* options specified by the user */ static AVInputFormat *file_iformat; static const char *input_filename; static const char *window_title; static int default_width = 640; static int default_height = 480; static int screen_width = 0; static int screen_height = 0; static int audio_disable; static int video_disable; static int subtitle_disable; static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0}; static int seek_by_bytes = -1; static int display_disable; static int borderless; static int startup_volume = 100; static int show_status = 1; static int av_sync_type = AV_SYNC_AUDIO_MASTER; static int64_t start_time = AV_NOPTS_VALUE; static int64_t duration = AV_NOPTS_VALUE; static int fast = 0; static int genpts = 0; static int lowres = 0; static int decoder_reorder_pts = -1; static int autoexit; static int exit_on_keydown; static int exit_on_mousedown; static int loop = 1; static int framedrop = -1; static int infinite_buffer = -1; static enum ShowMode show_mode = SHOW_MODE_NONE; static const char *audio_codec_name; static const char *subtitle_codec_name; static const char *video_codec_name; double rdftspeed = 0.02; static int64_t cursor_last_shown; static int cursor_hidden = 0; #if CONFIG_AVFILTER static const char **vfilters_list = NULL; static int nb_vfilters = 0; static char *afilters = NULL; #endif static int autorotate = 1; /* current context */ static int is_full_screen; static int64_t audio_callback_time; static AVPacket flush_pkt; #define FF_QUIT_EVENT (SDL_USEREVENT + 2) static SDL_Window *window; static SDL_Renderer *renderer; void exit_program(int ret) { exit(ret); } void *grow_array(void *array, int elem_size, int *size, int new_size) { if (new_size >= INT_MAX / elem_size) { av_log(NULL, AV_LOG_ERROR, "Array too big.\n"); exit_program(1); } if (*size < new_size) { uint8_t *tmp = (uint8_t *)av_realloc_array(array, new_size, elem_size); if (!tmp) { av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n"); exit_program(1); } memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size); *size = new_size; return tmp; } return array; } #define GROW_ARRAY(array, nb_elems)\ array = (const char **)grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1) #if CONFIG_AVFILTER static int opt_add_vfilter(void *optctx, const char *opt, const char *arg) { GROW_ARRAY(vfilters_list, nb_vfilters); vfilters_list[nb_vfilters - 1] = arg; return 0; } #endif int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) { int ret = avformat_match_stream_specifier(s, st, spec); if (ret < 0) av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec); return ret; } AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec) { AVDictionary *ret = NULL; AVDictionaryEntry *t = NULL; int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; char prefix = 0; const AVClass *cc = avcodec_get_class(); if (!codec) codec = s->oformat ? avcodec_find_encoder(codec_id) : avcodec_find_decoder(codec_id); switch (st->codecpar->codec_type) { case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break; case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break; case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break; } while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) { char *p = strchr(t->key, ':'); /* check stream specification in opt name */ if (p) switch (check_stream_specifier(s, st, p + 1)) { case 1: *p = 0; break; case 0: continue; default: exit_program(1); } if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) || !codec || (codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ))) av_dict_set(&ret, t->key, t->value, 0); else if (t->key[0] == prefix && av_opt_find(&cc, t->key + 1, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ)) av_dict_set(&ret, t->key + 1, t->value, 0); if (p) *p = ':'; } return ret; } AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts) { int i; AVDictionary **opts; if (!s->nb_streams) return NULL; opts = (AVDictionary **)av_mallocz_array(s->nb_streams, sizeof(*opts)); if (!opts) { av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n"); return NULL; } for (i = 0; i < s->nb_streams; i++) opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id, s, s->streams[i], NULL); return opts; } double get_rotation(AVStream *st) { uint8_t* displaymatrix = av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, NULL); double theta = 0; if (displaymatrix) theta = -av_display_rotation_get((int32_t*) displaymatrix); theta -= 360*floor(theta/360 + 0.9/360); if (fabs(theta - 90*round(theta/90)) > 2) av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n" "If you want to help, upload a sample " "of this file to ftp://upload.ffmpeg.org/incoming/ " "and contact the ffmpeg-devel mailing list. ([email protected])"); return theta; } static inline int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2) { /* If channel count == 1, planar and non-planar formats are the same */ if (channel_count1 == 1 && channel_count2 == 1) return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2); else return channel_count1 != channel_count2 || fmt1 != fmt2; } static inline int64_t get_valid_channel_layout(int64_t channel_layout, int channels) { if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels) return channel_layout; else return 0; } static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt) { MyAVPacketList *pkt1; if (q->abort_request) return -1; pkt1 = (MyAVPacketList *)av_malloc(sizeof(MyAVPacketList)); if (!pkt1) return -1; pkt1->pkt = *pkt; pkt1->next = NULL; if (pkt == &flush_pkt) q->serial++; pkt1->serial = q->serial; if (!q->last_pkt) q->first_pkt = pkt1; else q->last_pkt->next = pkt1; q->last_pkt = pkt1; q->nb_packets++; q->size += pkt1->pkt.size + sizeof(*pkt1); q->duration += pkt1->pkt.duration; /* XXX: should duplicate packet data in DV case */ SDL_CondSignal(q->cond); return 0; } static int packet_queue_put(PacketQueue *q, AVPacket *pkt) { int ret; SDL_LockMutex(q->mutex); ret = packet_queue_put_private(q, pkt); SDL_UnlockMutex(q->mutex); if (pkt != &flush_pkt && ret < 0) av_packet_unref(pkt); return ret; } static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index) { AVPacket pkt1, *pkt = &pkt1; av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; pkt->stream_index = stream_index; return packet_queue_put(q, pkt); } /* packet queue handling */ static int packet_queue_init(PacketQueue *q) { memset(q, 0, sizeof(PacketQueue)); q->mutex = SDL_CreateMutex(); if (!q->mutex) { av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError()); return AVERROR(ENOMEM); } q->cond = SDL_CreateCond(); if (!q->cond) { av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError()); return AVERROR(ENOMEM); } q->abort_request = 1; return 0; } static void packet_queue_flush(PacketQueue *q) { MyAVPacketList *pkt, *pkt1; SDL_LockMutex(q->mutex); for (pkt = q->first_pkt; pkt; pkt = pkt1) { pkt1 = pkt->next; av_packet_unref(&pkt->pkt); av_freep(&pkt); } q->last_pkt = NULL; q->first_pkt = NULL; q->nb_packets = 0; q->size = 0; q->duration = 0; SDL_UnlockMutex(q->mutex); } static void packet_queue_destroy(PacketQueue *q) { packet_queue_flush(q); SDL_DestroyMutex(q->mutex); SDL_DestroyCond(q->cond); } static void packet_queue_abort(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 1; SDL_CondSignal(q->cond); SDL_UnlockMutex(q->mutex); } static void packet_queue_start(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 0; packet_queue_put_private(q, &flush_pkt); SDL_UnlockMutex(q->mutex); } /* return < 0 if aborted, 0 if no packet and > 0 if packet. */ static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) { MyAVPacketList *pkt1; int ret; SDL_LockMutex(q->mutex); for (;;) { if (q->abort_request) { ret = -1; break; } pkt1 = q->first_pkt; if (pkt1) { q->first_pkt = pkt1->next; if (!q->first_pkt) q->last_pkt = NULL; q->nb_packets--; q->size -= pkt1->pkt.size + sizeof(*pkt1); q->duration -= pkt1->pkt.duration; *pkt = pkt1->pkt; if (serial) *serial = pkt1->serial; av_free(pkt1); ret = 1; break; } else if (!block) { ret = 0; break; } else { SDL_CondWait(q->cond, q->mutex); } } SDL_UnlockMutex(q->mutex); return ret; } static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) { memset(d, 0, sizeof(Decoder)); d->avctx = avctx; d->queue = queue; d->empty_queue_cond = empty_queue_cond; d->start_pts = AV_NOPTS_VALUE; } // 解碼 static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) { int got_frame = 0; do { int ret = -1; if (d->queue->abort_request) return -1; if (!d->packet_pending || d->queue->serial != d->pkt_serial) { AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_packet_unref(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1; } switch (d->avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { if (decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (!decoder_reorder_pts) { frame->pts = frame->pkt_dts; } } break; case AVMEDIA_TYPE_AUDIO: ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { AVRational tb = (AVRational){1, frame->sample_rate}; if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb); else if (d->next_pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb); if (frame->pts != AV_NOPTS_VALUE) { d->next_pts = frame->pts + frame->nb_samples; d->next_pts_tb = tb; } } break; case AVMEDIA_TYPE_SUBTITLE: ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp); break; } if (ret < 0) { d->packet_pending = 0; } else { d->pkt_temp.dts = d->pkt_temp.pts = AV_NOPTS_VALUE; if (d->pkt_temp.data) { if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = d->pkt_temp.size; d->pkt_temp.data += ret; d->pkt_temp.size -= ret; if (d->pkt_temp.size <= 0) d->packet_pending = 0; } else { if (!got_frame) { d->packet_pending = 0; d->finished = d->pkt_serial; } } } } while (!got_frame && !d->finished); return got_frame; } static void decoder_destroy(Decoder *d) { av_packet_unref(&d->pkt); avcodec_free_context(&d->avctx); } static void frame_queue_unref_item(Frame *vp) { av_frame_unref(vp->frame); avsubtitle_free(&vp->sub); } static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last) { int i; memset(f, 0, sizeof(FrameQueue)); if (!(f->mutex = SDL_CreateMutex())) { av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError()); return AVERROR(ENOMEM); } if (!(f->cond = SDL_CreateCond())) { av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError()); return AVERROR(ENOMEM); } f->pktq = pktq; f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE); f->keep_last = !!keep_last; for (i = 0; i < f->max_size; i++) if (!(f->queue[i].frame = av_frame_alloc())) return AVERROR(ENOMEM); return 0; } static void frame_queue_destory(FrameQueue *f) { int i; for (i = 0; i < f->max_size; i++) { Frame *vp = &f->queue[i]; frame_queue_unref_item(vp); av_frame_free(&vp->frame); } SDL_DestroyMutex(f->mutex); SDL_DestroyCond(f->cond); } static void frame_queue_signal(FrameQueue *f) { SDL_LockMutex(f->mutex); SDL_CondSignal(f->cond); SDL_UnlockMutex(f->mutex); } static Frame *frame_queue_peek(FrameQueue *f) { return &f->queue[(f->rindex + f->rindex_shown) % f->max_size]; } static Frame *frame_queue_peek_next(FrameQueue *f) { return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size]; } static Frame *frame_queue_peek_last(FrameQueue *f) { return &f->queue[f->rindex]; } static Frame *frame_queue_peek_writable(FrameQueue *f) { /* wait until we have space to put a new frame */ SDL_LockMutex(f->mutex); while (f->size >= f->max_size && !f->pktq->abort_request) { SDL_CondWait(f->cond, f->mutex); } SDL_UnlockMutex(f->mutex); if (f->pktq->abort_request) return NULL; return &f->queue[f->windex]; } static Frame *frame_queue_peek_readable(FrameQueue *f) { /* wait until we have a readable a new frame */ SDL_LockMutex(f->mutex); while (f->size - f->rindex_shown <= 0 && !f->pktq->abort_request) { SDL_CondWait(f->cond, f->mutex); } SDL_UnlockMutex(f->mutex); if (f->pktq->abort_request) return NULL; return &f->queue[(f->rindex + f->rindex_shown) % f->max_size]; } static void frame_queue_push(FrameQueue *f) { if (++f->windex == f->max_size) f->windex = 0; SDL_LockMutex(f->mutex); f->size++; SDL_CondSignal(f->cond); SDL_UnlockMutex(f->mutex); } static void frame_queue_next(FrameQueue *f) { if (f->keep_last && !f->rindex_shown) { f->rindex_shown = 1; return; } frame_queue_unref_item(&f->queue[f->rindex]); if (++f->rindex == f->max_size) f->rindex = 0; SDL_LockMutex(f->mutex); f->size--; SDL_CondSignal(f->cond); SDL_UnlockMutex(f->mutex); } /* return the number of undisplayed frames in the queue */ static int frame_queue_nb_remaining(FrameQueue *f) { return f->size - f->rindex_shown; } /* return last shown position */ static int64_t frame_queue_last_pos(FrameQueue *f) { Frame *fp = &f->queue[f->rindex]; if (f->rindex_shown && fp->serial == f->pktq->serial) return fp->pos; else return -1; } static void decoder_abort(Decoder *d, FrameQueue *fq) { packet_queue_abort(d->queue); frame_queue_signal(fq); SDL_WaitThread(d->decoder_tid, NULL); d->decoder_tid = NULL; packet_queue_flush(d->queue); } static inline void fill_rectangle(int x, int y, int w, int h) { SDL_Rect rect; rect.x = x; rect.y = y; rect.w = w; rect.h = h; if (w && h) SDL_RenderFillRect(renderer, &rect); } static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture) { Uint32 format; int access, w, h; if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) { void *pixels; int pitch; SDL_DestroyTexture(*texture); if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height))) return -1; if (SDL_SetTextureBlendMode(*texture, blendmode) < 0) return -1; if (init_texture) { if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0) return -1; memset(pixels, 0, pitch * new_height); SDL_UnlockTexture(*texture); } } return 0; } static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar) { float aspect_ratio; int width, height, x, y; if (pic_sar.num == 0) aspect_ratio = 0; else aspect_ratio = av_q2d(pic_sar); if (aspect_ratio <= 0.0) aspect_ratio = 1.0; aspect_ratio *= (float)pic_width / (float)pic_height; /* XXX: we suppose the screen has a 1.0 pixel ratio */ height = scr_height; width = lrint(height * aspect_ratio) & ~1; if (width > scr_width) { width = scr_width; height = lrint(width / aspect_ratio) & ~1; } x = (scr_width - width) / 2; y = (scr_height - height) / 2; rect->x = scr_xleft + x; rect->y = scr_ytop + y; rect->w = FFMAX(width, 1); rect->h = FFMAX(height, 1); } static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) { int ret = 0; switch (frame->format) { case AV_PIX_FMT_YUV420P: if (frame->linesize[0] < 0 || frame->linesize[1] < 0 || frame->linesize[2] < 0) { av_log(NULL, AV_LOG_ERROR, "Negative linesize is not supported for YUV.\n"); return -1; } ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0], frame->data[1], frame->linesize[1], frame->data[2], frame->linesize[2]); break; case AV_PIX_FMT_BGRA: if (frame->linesize[0] < 0) { ret = SDL_UpdateTexture(tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]); } else { ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]); } break; default: /* This should only happen if we are not using avfilter... */ *img_convert_ctx = sws_getCachedContext(*img_convert_ctx, frame->width, frame->height, (enum AVPixelFormat)frame->format, frame->width, frame->height, AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL); if (*img_convert_ctx != NULL) { uint8_t *pixels[4]; int pitch[4]; if (!SDL_LockTexture(tex, NULL, (void **)pixels, pitch)) { sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize, 0, frame->height, pixels, pitch); SDL_UnlockTexture(tex); } } else { av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n"); ret = -1; } break; } return ret; } static void video_image_display(VideoState *is) { Frame *vp; Frame *sp = NULL; SDL_Rect rect; vp = frame_queue_peek_last(&is->pictq); if (is->subtitle_st) { if (frame_queue_nb_remaining(&is->subpq) > 0) { sp = frame_queue_peek(&is->subpq); if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) { if (!sp->uploaded) { uint8_t* pixels[4]; int pitch[4]; int i; if (!sp->width || !sp->height) { sp->width = vp->width; sp->height = vp->height; } if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0) return; for (i = 0; i < sp->sub.num_rects; i++) { AVSubtitleRect *sub_rect = sp->sub.rects[i]; sub_rect->x = av_clip(sub_rect->x, 0, sp->width ); sub_rect->y = av_clip(sub_rect->y, 0, sp->height); sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x); sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y); is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx, sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8, sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA, 0