发布于 2019-01-24
616人围观 0条评论
发表于 2019-01-24
616人围观 0条评论
前言
本文参考雷霄骅的博文最简单的基于FFMPEG+SDL的视频播放器:拆分-解码器和播放器,使用c++根据ffmpeg-4.1版本改写(原文代码基于旧版本ffmpeg)。代码见下文。
本文代码地址见https://github.com/2997215859/ffplay-learn/blob/master/Video/decode2yuv.cpp
本文代码基于ffmpeg-4.1版本,事先需要安装好ffmpeg
本文代码提供CMakeLists.txt,见附录CMakeLists.txt部分,或根据CMakeLists.txt改写。需要链接的库如下(基本上安装ffmpeg、ffplay、SDL2之后就有了)。
avdevice avfilter avformat avcodec swscale swresample postproc avutil m xcb xcb-shm xcb xcb-shape xcb xcb-xfixes xcb-render xcb-shape xcb asound pthread m fontconfig freetype freetype z bz2 lzma SDL2 SDL2main
代码注解
#include <iostream>
#include <cstdio>
#include <cstdlib>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
using namespace std;
int main () {
string filepath = "/home/sensetime/videos/big_buck_bunny_720p_30mb.mp4";
string outputFile = "/home/sensetime/videos/output.yuv";
// initialization,初始化
avdevice_register_all();
avformat_network_init();
AVFormatContext *avFormatContext = avformat_alloc_context();
if (avformat_open_input(&avFormatContext, filepath.c_str(), NULL, NULL) != 0) { // 打开文件,生成format协议上下文
cerr << "Failed to open input stream.\n";
return -1;
}
if (avformat_find_stream_info(avFormatContext, NULL) < 0) { // 查看文件格式信息
cerr << "Failed to find stream information\n";
return -1;
}
// 定位第一个视频流(video stream)
int videoIndex = -1;
for (int i=0;i<avFormatContext->nb_streams;i++) {
if(avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){
videoIndex = i;
break;
}
}
if (videoIndex == -1) {
cerr << "Failed to find a video stream.\n";
return -1;
}
AVStream *avStream = avFormatContext->streams[videoIndex]; // 获取该流指针
AVCodec *avCodec = avcodec_find_decoder(avStream->codecpar->codec_id); // 获取该流的解码器
if (!avCodec) {
cerr << "Failed to find decoder for stream.\n";
return -1;
}
AVCodecContext *avCodecContext = avcodec_alloc_context3(avCodec); // 为该解码器新建解码上下文
if (!avCodecContext) {
cerr << "Failed to allocate the decoder context for stream\n";
return -1;
}
if(avcodec_parameters_to_context(avCodecContext, avStream->codecpar) < 0) { // 将流的解码参数拷贝到解码上下文
cerr << "Failed to copy decoder parameters to input decoder context.\n";
return -1;
}
if (avcodec_open2(avCodecContext, avCodec, NULL) < 0) { // 打开解码器
cerr << "Failed to open codec.\n";
return -1;
}
AVFrame *yuvFrame = av_frame_alloc();
// 根据输入的解码上下文获取输入流的宽高,基于宽高和图片每个像素的存储方式,分配每个图片的存储空间
unsigned char* outBuffer = (unsigned char*) av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, avCodecContext->width, avCodecContext->height, 1));
av_image_fill_arrays(yuvFrame->data, yuvFrame->linesize, outBuffer, AV_PIX_FMT_YUV420P, avCodecContext->width, avCodecContext->height, 1);
// Output Info ----
cout << "--------------- File Information ---------------\n";
av_dump_format(avFormatContext, 0, filepath.c_str(), 0);
cout << "-----------------------------------------------\n";
// 创建图像转换上下文
SwsContext *imgConvertContext = sws_getContext(avCodecContext->width, avCodecContext->height, avCodecContext->pix_fmt, avCodecContext->width, avCodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));
AVFrame *frame = av_frame_alloc();
FILE *fpYUV = fopen(outputFile.c_str(), "wb+");
while (av_read_frame(avFormatContext, packet) >= 0) { // 从format上下文读取压缩编码的packet
if (packet->stream_index == videoIndex) {
if (avcodec_send_packet(avCodecContext, packet) < 0) { // 将packet提交到解码上下文,进行解码
cerr << "Failed to submitting the packet to the decoder\n";
return -1;
}
// 从解码上下文中读取所有输出的frames
int ret;
while ((ret = avcodec_receive_frame(avCodecContext, frame)) >= 0) {
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF || ret < 0) {
cerr << "Error during decoding\n";
return -1;
}
// sws_scaled的作用是将frame作像素转化和图像拉伸等等,这里是将数据转换到新的yuvFrame
sws_scale(imgConvertContext, frame->data, frame->linesize, 0, avCodecContext->height, yuvFrame->data, yuvFrame->linesize);
int ySize = avCodecContext->width * avCodecContext->height;
// 根据YUV420的特点,将每帧的数据以此按照Y、U、V三层保存到输出文件中
fwrite(yuvFrame->data[0], 1, ySize, fpYUV); // Y
fwrite(yuvFrame->data[1], 1, ySize/4, fpYUV); // U
fwrite(yuvFrame->data[2], 1, ySize/4, fpYUV); // V
cerr << "Succeed to decode 1 frame!\n";
}
}
av_packet_unref(packet);
}
sws_freeContext(imgConvertContext);
fclose(fpYUV);
av_frame_free(&yuvFrame);
av_frame_free(&frame);
avcodec_close(avCodecContext);
avformat_close_input(&avFormatContext);
return 0;
}附录
CMakeLists.txt
cmake_minimum_required(VERSION 3.13)
project(player)
set(CMAKE_CXX_STANDARD 11)
include_directories(.)
include_directories(/usr/include/.)
include_directories(/usr/local/include/.)
#link_directories(/usr/lib/)
link_directories(/usr/local/lib/)
# 设置可执行文件生成路径
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/bin")
# 生成debug版本
SET(CMAKE_BUILD_TYPE "release")
if (CMAKE_BUILD_TYPE STREQUAL debug)
add_definitions(-D_DEBUG)
endif ()
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb -std=c++11")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall -std=c++11")
add_executable(decode2yuv decode2yuv.cpp)
target_link_libraries(decode2yuv
avdevice
avfilter
avformat
avcodec
swscale
swresample
postproc
avutil
# avresample
m xcb xcb-shm xcb xcb-shape xcb xcb-xfixes xcb-render xcb-shape xcb asound
pthread m fontconfig freetype freetype
z
bz2
lzma
SDL2
SDL2main)
没有帐号?立即注册