├── .gitignore ├── README.md ├── ffmpeg-video-avfilter ├── CMakeLists.txt ├── README.md └── src │ ├── decoder.cpp │ ├── decoder.h │ ├── main.cpp │ └── time_util.h ├── ffmpeg-video-avstream ├── CMakeLists.txt ├── README.md └── src │ ├── decoder.cpp │ ├── decoder.h │ ├── main.cpp │ └── time_util.h ├── ffmpeg-video-decoder ├── CMakeLists.txt ├── README.md └── src │ ├── decoder.cpp │ ├── decoder.h │ └── main.cpp ├── ffmpeg-video-image-push ├── CMakeLists.txt ├── README.md └── src │ ├── decoder.cpp │ ├── decoder.h │ └── main.cpp ├── ffmpeg-video-overlay ├── CMakeLists.txt ├── README.md └── src │ ├── decoder.cpp │ ├── decoder.h │ ├── main.cpp │ ├── time_util.h │ └── trans_image.h └── ffmpeg-video-transcoder ├── CMakeLists.txt ├── README.md └── src ├── decoder.cpp ├── decoder.h ├── main.cpp └── time_util.h /.gitignore: -------------------------------------------------------------------------------- 1 | ### IntelliJ IDEA ### 2 | .idea 3 | .DS_Store 4 | *.iws 5 | *.iml 6 | *.ipr 7 | 8 | ### Project ### 9 | build 10 | third 11 | cmake-build-debug 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ffmpeg-cpp-video-processes 2 | 3 | ffmpeg-video-decoder 该模块是将视频解码成jpg图片,主要依赖ffmpeg、opencv这两个第三方库。如果视频资源地址是https,则还需要ssl库。 4 | 5 | ffmpeg-video-avfilter 该模块是将视频解码成jpg图片,加上avfilter实现的动态文字水印能力。 6 | 7 | ffmpeg-video-overlay 该模块是将视频解码成jpg图片,同时加上文字水印和图片水印的能力。 8 | 9 | ffmpeg-video-transcoder 该模块是将拉到的视频流直接转码推流到rtsp服务。 10 | 11 | ffmpeg-video-avstream 该模块是将视频流解码、avfilter、编码、推流到rtsp服务。 12 | 13 | ffmpeg-video-image-push 该模块与其他不同是,解码的是图片集合,而不是视频流,将图片解码后,编码成h264后再推流。 14 | 15 | 16 | 参考资料 17 | 18 | 雷神github:https://github.com/leixiaohua1020 19 | 20 | ffmpeg官方例子:https://ffmpeg.org/doxygen/3.4/examples.html 21 | -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-avfilter) 4 | 5 | set(TARGET avfilter_decoder) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avfilter avcodec avutil postproc swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl freetype) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流解码成图片的基础上加上了ffmpeg filter的能力。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-avfilter 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./avfilter_decoder 10 http://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time_util.h" 5 | 6 | using namespace std; 7 | 8 | void Decoder::decode(int num, std::string source) { 9 | 10 | decodeId++; 11 | decodeCount = num; 12 | 13 | int ret; 14 | if ((ret = initDecode(source)) < 0) { 15 | cout << "init decode fail :" << ret << endl; 16 | return; 17 | } 18 | 19 | if ((ret = initFilter()) < 0) { 20 | cout << "init filter fail :" << ret << endl; 21 | return; 22 | } 23 | 24 | if ((ret = decoding()) < 0) { 25 | cout << "decoding fail :" << ret << endl; 26 | return; 27 | } 28 | 29 | closeDecode(); 30 | } 31 | 32 | int Decoder::initDecode(std::string source) { 33 | 34 | av_register_all(); 35 | 36 | int ret; 37 | if ((ret = avformat_network_init()) != 0) { 38 | cout << "avformat_network_init failed, ret: " << ret << endl; 39 | return ret; 40 | } 41 | 42 | ret = avformat_open_input(&pFormatCtx, source.c_str(), nullptr, &pAvDict); 43 | if (ret != 0) { 44 | cout << "avformat_open_input failed, ret: " << ret << endl; 45 | return ret; 46 | } 47 | 48 | ret = avformat_find_stream_info(pFormatCtx, nullptr); 49 | if (ret < 0) { 50 | cout << "avformat_find_stream_info failed, ret: " << ret << endl; 51 | return ret; 52 | } 53 | 54 | if ((ret = findVideoStreamIndex()) < 0) { 55 | cout << "findVideoStreamIndex failed, ret: " << ret << endl; 56 | return ret; 57 | } 58 | 59 | // Get a pointer to the codec context for the video stream 60 | pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 61 | 62 | // Find the decoder for the video stream 63 | AVCodec *pCodec = nullptr; 64 | pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 65 | if(pCodec == NULL) { 66 | cout << "Unsupported codec!" << endl; 67 | return Constat::system_error; // Codec not found 68 | } 69 | 70 | // Copy context 71 | pCodecCtx = avcodec_alloc_context3(pCodec); 72 | if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 73 | cout << "Couldn't copy codec context!" << ret << endl; 74 | return Constat::system_error; 75 | } 76 | 77 | // Open codec 78 | if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 79 | cout << "Could not open codec!" << ret << endl; 80 | return Constat::system_error; 81 | } 82 | 83 | if(pCodecCtx->framerate.den > 0) { 84 | int fps = (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den); 85 | if(decodeFps == 0) { 86 | decodeFps = fps; 87 | } 88 | skip = fps / decodeFps; 89 | } 90 | 91 | // Allocate video frame 92 | pFrame=av_frame_alloc(); 93 | 94 | // Allocate an AVFrame structure 95 | pFrameRGB=av_frame_alloc(); 96 | 97 | // Allocate an AVFrame structure 98 | pFrameOut=av_frame_alloc(); 99 | 100 | // Determine required buffer size and allocate buffer 101 | imgWidth = pCodecCtx->width; 102 | imgHeight = pCodecCtx->height; 103 | imgSize = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 104 | buffer = (uint8_t *)av_malloc(imgSize * sizeof(uint8_t)); 105 | 106 | // Assign appropriate parts of buffer to image planes in pFrameRGB 107 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 108 | // of AVPicture 109 | avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, imgWidth, imgHeight); 110 | 111 | // initialize SWS context for software scaling 112 | sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 113 | pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 114 | AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr); 115 | 116 | cout << "open stream succ, source: " << source << ", find video stream idx: " << videoStream 117 | << ", width: " << imgWidth << ", height: " << imgHeight << ", size: " << imgSize << ", skip: " << skip 118 | << ", decodeCount: " << decodeCount << endl; 119 | return Constat::ok; 120 | } 121 | 122 | int Decoder::initFilter() { 123 | 124 | avfilter_register_all(); 125 | 126 | char args[512]; 127 | AVFilter *buffersrc = avfilter_get_by_name("buffer"); 128 | AVFilter *buffersink = avfilter_get_by_name("buffersink"); 129 | AVFilterInOut *outputs = avfilter_inout_alloc(); 130 | AVFilterInOut *inputs = avfilter_inout_alloc(); 131 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; 132 | 133 | /* buffer video source: the decoded frames from the decoder will be inserted here. */ 134 | filterGraph = avfilter_graph_alloc(); 135 | snprintf(args, sizeof(args), 136 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 137 | pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 138 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, 139 | pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den); 140 | 141 | int ret = avfilter_graph_create_filter(&buffersrcCtx, buffersrc, "in", args, NULL, filterGraph); 142 | if (ret < 0) { 143 | cout << "Cannot create buffer source!" << endl; 144 | return ret; 145 | } 146 | 147 | /* buffer video sink: to terminate the filter chain. */ 148 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 149 | buffersink_params->pixel_fmts = pix_fmts; 150 | ret = avfilter_graph_create_filter(&buffersinkCtx, buffersink, "out", NULL, buffersink_params, filterGraph); 151 | av_free(buffersink_params); 152 | if (ret < 0) { 153 | cout << "Cannot create buffer sink!" << endl; 154 | return ret; 155 | } 156 | 157 | /* Endpoints for the filter graph. */ 158 | outputs->name = av_strdup("in"); 159 | outputs->filter_ctx = buffersrcCtx; 160 | outputs->pad_idx = 0; 161 | outputs->next = NULL; 162 | 163 | inputs->name = av_strdup("out"); 164 | inputs->filter_ctx = buffersinkCtx; 165 | inputs->pad_idx = 0; 166 | inputs->next = NULL; 167 | 168 | if ((ret = avfilter_graph_parse_ptr(filterGraph, filtersDesc, &inputs, &outputs, NULL)) < 0){ 169 | cout << "avfilter_graph_parse_ptr failed, ret: " << ret << endl; 170 | return ret; 171 | } 172 | 173 | if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0) { 174 | cout << "avfilter_graph_config failed, ret: " << ret << endl; 175 | return ret; 176 | } 177 | 178 | filterCtx = filterGraph->filters[2]; 179 | return Constat::ok; 180 | } 181 | 182 | int Decoder::findVideoStreamIndex() { 183 | for (size_t i = 0; i < pFormatCtx->nb_streams; i++) { 184 | if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 185 | videoStream = i; 186 | return 0; 187 | } 188 | } 189 | return Constat::system_error; 190 | } 191 | 192 | 193 | int Decoder::decoding() { 194 | 195 | AVPacket packet; 196 | av_init_packet(&packet); 197 | int got_frame = 0; 198 | 199 | while(1) { 200 | 201 | int ret = av_read_frame(pFormatCtx, &packet); 202 | if (ret != 0) { 203 | if (ret == AVERROR_EOF) { 204 | cout << "av_read_frame failed, ret: " << ret << endl; 205 | } else { 206 | cout << "av_read_frame ret eof!" << endl; 207 | } 208 | break; 209 | } 210 | 211 | if (packet.stream_index != videoStream) { 212 | av_packet_unref(&packet); 213 | continue; 214 | } 215 | 216 | if ((ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, &packet)) < 0) { 217 | cout << "avcodec_decode_video2 ret: " << ret << endl; 218 | av_packet_unref(&packet); 219 | continue; 220 | } 221 | 222 | pts = packet.pts; 223 | if (got_frame > 0) { 224 | 225 | if(decodeCount == 0) { 226 | cout << "decode finish!!!" << endl; 227 | break; 228 | } 229 | 230 | if(skip == 0 || decodeFrameNum % skip == 0) { 231 | 232 | //avfilter 233 | pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); 234 | 235 | //av_opt_set 236 | stringstream ss; 237 | ss << "avfilter - " << getFormatTime(); 238 | av_opt_set(filterCtx->priv, "text", ss.str().c_str(), 0); 239 | 240 | /* push the decoded frame into the filtergraph */ 241 | if (av_buffersrc_add_frame(buffersrcCtx, pFrame) < 0) { 242 | cout << "Error while feeding the filtergraph!" << endl; 243 | break; 244 | } 245 | 246 | /* pull filtered pictures from the filtergraph */ 247 | if (av_buffersink_get_frame(buffersinkCtx, pFrameOut) < 0) { 248 | cout << "Error get filter frame the filtergraph!" << endl; 249 | break; 250 | } 251 | 252 | decodeCount--; 253 | sws_scale(sws_ctx, (const uint8_t *const *)pFrameOut->data, 254 | pFrameOut->linesize, 0, imgHeight, pFrameRGB->data, pFrameRGB->linesize); 255 | //saveImage(getImageName()); 256 | 257 | av_frame_unref(pFrame); 258 | av_frame_unref(pFrameOut); 259 | } 260 | 261 | decodeFrameNum++; 262 | cout << "time: " << getFormatTime() << " decodeFrameNum: " << decodeFrameNum << endl; 263 | } 264 | 265 | av_packet_unref(&packet); 266 | } 267 | return Constat::ok; 268 | } 269 | 270 | void Decoder::saveImage(std::string filename) { 271 | cv::Mat mat; 272 | mat.create(cv::Size(imgWidth, imgHeight), CV_8UC3); 273 | mat.data = buffer; 274 | if (!cv::imwrite(filename, mat)) { 275 | cout<< "saveImage failed, filename: " << filename << ", pts: " << pts << endl; 276 | } else { 277 | cout<< "saveImage success, filename: " << filename << ", pts: " << pts << endl; 278 | } 279 | } 280 | 281 | std::string Decoder::getImageName() { 282 | time_t ts = time(NULL); 283 | std::stringstream ss; 284 | ss << "/tmp/"; 285 | ss << decodeId; 286 | ss << "_"; 287 | ss << ts; 288 | ss << "_"; 289 | ss << decodeFrameNum; 290 | ss << "."; 291 | ss << "jpg"; 292 | return ss.str(); 293 | } 294 | 295 | void Decoder::closeDecode() { 296 | 297 | avfilter_graph_free(&filterGraph); 298 | 299 | av_frame_free(&pFrameOut); 300 | av_frame_free(&pFrameRGB); 301 | av_frame_free(&pFrame); 302 | 303 | avcodec_close(pCodecCtx); 304 | avcodec_free_context(&pCodecCtx); 305 | 306 | avcodec_close(pCodecCtxOrig); 307 | //avcodec_free_context(&pCodecCtxOrig); //Mustn't free 308 | 309 | //avfilter_free(filterCtx); 310 | 311 | avformat_close_input(&pFormatCtx); 312 | av_dict_free(&pAvDict); 313 | } -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #ifdef __cplusplus 18 | } 19 | #endif 20 | 21 | #include 22 | #include 23 | 24 | class Decoder { 25 | 26 | public: 27 | void decode(int, std::string); 28 | 29 | private: 30 | int initDecode(std::string); 31 | int initFilter(); 32 | int findVideoStreamIndex(); 33 | int decoding(); 34 | std::string getImageName(); 35 | void saveImage(std::string); 36 | void closeDecode(); 37 | 38 | private: 39 | 40 | int videoStream = -1; 41 | AVDictionary *pAvDict = nullptr; 42 | AVFormatContext *pFormatCtx = nullptr; 43 | AVCodecContext *pCodecCtxOrig = nullptr; 44 | AVCodecContext *pCodecCtx = nullptr; 45 | SwsContext *sws_ctx = nullptr; 46 | 47 | AVFrame *pFrame = nullptr; 48 | AVFrame *pFrameRGB = nullptr; 49 | AVFrame *pFrameOut = nullptr; 50 | uint8_t *buffer = nullptr; 51 | 52 | int imgWidth = 0; 53 | int imgHeight = 0; 54 | int imgSize = 0; 55 | int decodeFps = 1; 56 | int skip = 0; 57 | int pts = 0; 58 | 59 | int decodeId = 0; 60 | int decodeCount = 0; 61 | int decodeFrameNum = 0; 62 | 63 | AVFilterGraph *filterGraph; 64 | AVFilterContext *buffersrcCtx; 65 | AVFilterContext *buffersinkCtx; 66 | AVFilterContext *filterCtx; 67 | 68 | char *filtersDesc = "drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'"; 69 | 70 | }; 71 | 72 | enum Constat { 73 | ok = 0, 74 | system_error = -1 75 | }; 76 | 77 | #endif //HELLO_DECODE_DECODE_H 78 | -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | using namespace std; 5 | 6 | /** 7 | * ./avfilter_decoder 10 https://......... 8 | * 9 | * @param argc 10 | * @param argv 11 | * @return 12 | */ 13 | int main(int argc, char** argv) { 14 | 15 | if (argc <= 2) { 16 | cout << "please input valid arguments." << endl; 17 | return 0; 18 | } 19 | 20 | Decoder d; 21 | d.decode(atoi(argv[1]), argv[2]); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /ffmpeg-video-avfilter/src/time_util.h: -------------------------------------------------------------------------------- 1 | #ifndef FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 2 | #define FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 3 | 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | string getFormatTime() 9 | { 10 | time_t now_time; 11 | char buf[64] = {0}; 12 | now_time=time(NULL); 13 | strftime(buf, 128,"%Y-%m-%d %H:%M:%S", localtime(&now_time)); 14 | return buf; 15 | } 16 | 17 | #endif -------------------------------------------------------------------------------- /ffmpeg-video-avstream/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-avstream) 4 | 5 | set(TARGET video_avstream) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avfilter avcodec avutil postproc swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl freetype) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-avstream/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流解码,编码,推流。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-avstream 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./video_avstream http://.... rtsp://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-avstream/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time_util.h" 5 | 6 | using namespace std; 7 | 8 | void Decoder::decode(std::string source, std::string desc) { 9 | 10 | int ret; 11 | if ((ret = initDecode(source)) < 0) { 12 | cout << "init decode fail :" << ret << endl; 13 | return; 14 | } 15 | 16 | if ((ret = initEncode()) < 0) { 17 | cout << "init encode fail :" << ret << endl; 18 | return; 19 | } 20 | 21 | if ((ret = initFilter()) < 0) { 22 | cout << "init filter fail :" << ret << endl; 23 | return; 24 | } 25 | 26 | if ((ret = initPusher(desc)) < 0) { 27 | cout << "init pusher fail :" << ret << endl; 28 | return; 29 | } 30 | 31 | 32 | if ((ret = decoding()) < 0) { 33 | cout << "decoding fail :" << ret << endl; 34 | return; 35 | } 36 | 37 | closeDecode(); 38 | } 39 | 40 | int Decoder::initDecode(std::string source) { 41 | 42 | av_register_all(); 43 | 44 | int ret; 45 | if ((ret = avformat_network_init()) != 0) { 46 | cout << "avformat_network_init failed, ret: " << ret << endl; 47 | return ret; 48 | } 49 | 50 | //打开视频文件:读取文件头、文件格式等信息存储到AVFormatContext。 51 | ret = avformat_open_input(&pFormatCtx, source.c_str(), nullptr, &pAvDict); 52 | if (ret != 0) { 53 | cout << "avformat_open_input failed, ret: " << ret << endl; 54 | return ret; 55 | } 56 | 57 | //搜索流信息:读取一段视频文件数据,尝试解码,将取到的流信息存储到AVFormatContext。nb_streams(流的数量)和streams(流的指针数据)。 58 | ret = avformat_find_stream_info(pFormatCtx, nullptr); 59 | if (ret < 0) { 60 | cout << "avformat_find_stream_info failed, ret: " << ret << endl; 61 | return ret; 62 | } 63 | 64 | //查找视频流索引编号 65 | if ((ret = findVideoStreamIndex()) < 0) { 66 | cout << "findVideoStreamIndex failed, ret: " << ret << endl; 67 | return ret; 68 | } 69 | 70 | // Get a pointer to the codec context for the video stream 71 | pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 72 | 73 | // codec_id 是编解码器的id 74 | // 根据编解码器id 获取解码器 AVCodec 75 | // Find the decoder for the video stream 76 | AVCodec *pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 77 | if(pCodec == NULL) { 78 | cout << "Unsupported codec!" << endl; 79 | return Constat::system_error; // Codec not found 80 | } 81 | 82 | //分配AVCodecContext并初始化默认值 83 | pCodecCtx = avcodec_alloc_context3(pCodec); 84 | //将源AVCodecContext的设置复制到目标AVCodecContext中。 85 | //得到的"目标编解码器上下文"将是未打开的,必须调用avcodec_open2()才能打开它;才能用它编解码音视频。 86 | if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 87 | cout << "Couldn't copy codec context!" << ret << endl; 88 | return Constat::system_error; 89 | } 90 | 91 | // Open codec 92 | //初始化AVCodecContext以使用给定的AVCodec。 93 | if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 94 | cout << "Could not open codec!" << ret << endl; 95 | return Constat::system_error; 96 | } 97 | 98 | //framerate帧速率 分子/分母 分开存储,相除才是最终值fps 99 | //cout << "framerate.num: " << pCodecCtx->framerate.num << endl; 100 | //cout << "framerate.den: " << pCodecCtx->framerate.den << endl; 101 | //cout << "fps: " << (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den) << endl; 102 | //cout << "pix_fmt is AV_PIX_FMT_YUV420P : " << (pCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) << endl; 103 | 104 | //可以通过设定解码速率来控制解码跳帧 105 | if(pCodecCtx->framerate.den > 0) { 106 | decodeFps = (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den); 107 | if(encodeFps == 0) { 108 | encodeFps = decodeFps; 109 | } 110 | skip = decodeFps / encodeFps; 111 | } 112 | 113 | //创建AVFrame来存放从AVPacket中解码出来的原始数据。但是av_frame_alloc并不会分配数据的缓存空间。 114 | pFrame = av_frame_alloc(); 115 | 116 | //解码得到原始的分辨率 117 | imgWidth = pCodecCtx->width; 118 | imgHeight = pCodecCtx->height; 119 | //imgSize = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 120 | 121 | cout << "open stream succ, source: " << source << ", find video stream idx: " << videoStream 122 | << ", decodeFps: " << decodeFps << ", encodeFps: " << encodeFps << ", skip: " << skip 123 | << ", width: " << imgWidth << ", height: " << imgHeight << endl; 124 | return Constat::ok; 125 | } 126 | 127 | int Decoder::initEncode() { 128 | 129 | //获取h264编码器 130 | h264Codec = avcodec_find_encoder(AV_CODEC_ID_H264); 131 | if (!h264Codec){ 132 | cout << "avcodec_find_encoder failed" << endl; 133 | return Constat::system_error; 134 | } 135 | 136 | //创建编解码器的context,context可以用来设置编解码过程所需要的各种配置信息 137 | h264CodecCtx = avcodec_alloc_context3(h264Codec); 138 | if (!h264CodecCtx){ 139 | cout << "avcodec_find_encoder failed" << endl; 140 | return Constat::system_error; 141 | } 142 | 143 | //h264CodecCtx->bit_rate = 400000; //码率。这里设置固定码率应该没啥用。 144 | h264CodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; //编码的原始数据格式 145 | h264CodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; //指定为视频编码 146 | h264CodecCtx->width = imgWidth; //分辨率宽 147 | h264CodecCtx->height = imgHeight; //分辨率高 148 | h264CodecCtx->channels = 0; //音频通道数 149 | h264CodecCtx->time_base = {1, encodeFps}; //时间基,表示每个时间刻度是多少秒。 150 | h264CodecCtx->framerate = {encodeFps, 1}; //帧率,没秒 151 | h264CodecCtx->gop_size = 2 * encodeFps; //图像组两个关键帧(I帧)的距离,也就是一组帧的数量 原来是10 152 | h264CodecCtx->max_b_frames = 5; //指定B帧数量,B帧是双向参考帧,填充更多B帧,压缩率更高但是延迟也高 153 | h264CodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //暂时不明 154 | 155 | 156 | //av_opt_set(h264CodecCtx->priv_data, "preset", "slow", 0); //慢速压缩编码,慢的可以保证视频质量 157 | //av_opt_set(h264CodecCtx->priv_data, "preset", "veryfast", 0); 158 | av_opt_set(h264CodecCtx->priv_data, "preset", "ultrafast", 0); //快速编码,但会损失质量 159 | //av_opt_set(h264CodecCtx->priv_data, "tune", "zerolatency", 0); //适用于快速编码和低延迟流式传输,但是会出现绿屏 160 | 161 | //打开编码器 162 | if (avcodec_open2(h264CodecCtx, h264Codec, NULL) < 0){ 163 | cout << "fail: avcodec_open2" << endl; 164 | return false; 165 | } 166 | 167 | h264Frame = av_frame_alloc(); 168 | h264ImgSize = avpicture_get_size(h264CodecCtx->pix_fmt, h264CodecCtx->width, h264CodecCtx->height); 169 | h264Buffer = (uint8_t *)av_malloc(h264ImgSize * sizeof(uint8_t)); 170 | avpicture_fill((AVPicture *)h264Frame, h264Buffer, h264CodecCtx->pix_fmt, h264CodecCtx->width, h264CodecCtx->height); 171 | 172 | cout << "init encode succ !!!" << endl; 173 | return Constat::ok; 174 | } 175 | 176 | int Decoder::initFilter() { 177 | 178 | //注册所有过滤器 179 | avfilter_register_all(); 180 | 181 | //获取输入输出过滤器AVFilter的实例 182 | AVFilter *buffersrc = avfilter_get_by_name("buffer"); 183 | AVFilter *buffersink = avfilter_get_by_name("buffersink"); 184 | AVFilterInOut *outputs = avfilter_inout_alloc(); //TODO: Must be freed with avfilter_inout_free() 185 | AVFilterInOut *inputs = avfilter_inout_alloc(); //TODO: Must be freed with avfilter_inout_free() 186 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; 187 | 188 | 189 | //avfilter_graph_create_filter初始化输入的AVFilterContext即AVFilter的上下文。 190 | int ret; 191 | char args[512]; 192 | snprintf(args, sizeof(args), 193 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 194 | pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 195 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, 196 | pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den); 197 | filterGraph = avfilter_graph_alloc(); 198 | ret = avfilter_graph_create_filter(&buffersrcCtx, buffersrc, "in", args, NULL, filterGraph); 199 | if (ret < 0) { 200 | cout << "avfilter_graph_create_filter buffersrc failed:" << ret << endl; 201 | return ret; 202 | } 203 | 204 | //avfilter_graph_create_filter初始化输出的AVFilterContext即AVFilter的上下文。 205 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 206 | buffersink_params->pixel_fmts = pix_fmts; 207 | ret = avfilter_graph_create_filter(&buffersinkCtx, buffersink, "out", NULL, buffersink_params, filterGraph); 208 | av_free(buffersink_params); 209 | if (ret < 0) { 210 | cout << "avfilter_graph_create_filter buffersink failed:" << ret << endl; 211 | return ret; 212 | } 213 | 214 | /* Endpoints for the filter graph. */ 215 | outputs->name = av_strdup("in"); 216 | outputs->filter_ctx = buffersrcCtx; 217 | outputs->pad_idx = 0; 218 | outputs->next = NULL; 219 | 220 | inputs->name = av_strdup("out"); 221 | inputs->filter_ctx = buffersinkCtx; 222 | inputs->pad_idx = 0; 223 | inputs->next = NULL; 224 | 225 | if ((ret = avfilter_graph_parse_ptr(filterGraph, filtersDesc, &inputs, &outputs, NULL)) < 0){ 226 | cout << "avfilter_graph_parse_ptr failed, ret: " << ret << endl; 227 | return ret; 228 | } 229 | 230 | if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0) { 231 | cout << "avfilter_graph_config failed, ret: " << ret << endl; 232 | return ret; 233 | } 234 | 235 | filterCtx = filterGraph->filters[2]; 236 | 237 | cout << "init filter succ !!!" << endl; 238 | return Constat::ok; 239 | } 240 | 241 | int Decoder::initPusher(std::string desc) { 242 | 243 | //初始化rtsp推流的AVFormatContext 244 | int ret; 245 | if ((ret = avformat_alloc_output_context2(&outFormatCtx, NULL, "rtsp", desc.c_str())) < 0){ 246 | cout << "avformat_alloc_output_context2 failed, ret: " << ret << endl; 247 | return Constat::system_error; 248 | } 249 | //检查所有流是否都有数据,如果没有数据会等待最大时间,单位微秒。 250 | outFormatCtx->max_interleave_delta = 1000000; 251 | 252 | //创建一个视频流AVStream 253 | AVStream *outAvStream = avformat_new_stream(outFormatCtx, h264Codec); 254 | if (!outAvStream){ 255 | cout << "avformat_new_stream failed!" << endl; 256 | return Constat::system_error; 257 | } 258 | 259 | //记录视频流的id。这里肯定是0,因为也只是创建了一个视频流 260 | outVideoindex = outFormatCtx->nb_streams - 1; 261 | 262 | //输出流的时间基 263 | outAvStream->time_base = { 1, encodeFps }; 264 | 265 | //设置流Id。 266 | outAvStream->id = outFormatCtx->nb_streams - 1; 267 | 268 | //复制编码器AVCodecContext上下文到输出流的codec字段 269 | if (avcodec_copy_context(outAvStream->codec, h264CodecCtx) < 0) { 270 | cout << "failed avcodec_copy_context" << endl; 271 | return Constat::system_error; 272 | } 273 | 274 | //这是用来解决一些编码器的错误。先传0。 275 | outAvStream->codec->codec_tag = 0; 276 | 277 | //暂时不懂 278 | if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) { 279 | outAvStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 280 | } 281 | 282 | //流拷贝编码器的参数配置 283 | avcodec_parameters_from_context(outAvStream->codecpar, h264CodecCtx); 284 | 285 | av_dump_format(outFormatCtx, 0, outFormatCtx->filename, 1); 286 | 287 | //如果不是文件,则不能调用avio_open进行打开, 288 | if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE)) { 289 | //打开输出URL 290 | if (avio_open(&outFormatCtx->pb, outFormatCtx->filename, AVIO_FLAG_WRITE) < 0) { 291 | cout << "avio_open failed, file: " << desc << endl; 292 | return Constat::system_error; 293 | } 294 | } 295 | 296 | //使用tcp协议传输 这是一种参数设置方式 297 | //av_opt_set(outFormatCtx->priv_data, "rtsp_transport", "tcp", 0); 298 | 299 | //另外一种参数设置方式 300 | av_dict_set(&h264Dict, "bufsize", "10240", 0); 301 | av_dict_set(&h264Dict, "stimeout", "2000000", 0); 302 | av_dict_set(&h264Dict, "rtsp_transport","tcp",0); 303 | av_dict_set(&h264Dict, "muxdelay", "0.1", 0); 304 | av_dict_set(&h264Dict, "tune", "zerolatency", 0); 305 | outFormatCtx->audio_codec_id = outFormatCtx->oformat->audio_codec; 306 | outFormatCtx->video_codec_id = outFormatCtx->oformat->video_codec; 307 | ret = avformat_write_header(outFormatCtx, &h264Dict); 308 | if (ret < 0) { 309 | cout << "error occurred when opening output url: " << desc << endl; 310 | return ret; 311 | } 312 | 313 | cout << "init pusher succ !!!" << endl; 314 | return Constat::ok; 315 | } 316 | 317 | int Decoder::findVideoStreamIndex() { 318 | for (size_t i = 0; i < pFormatCtx->nb_streams; i++) { 319 | if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 320 | videoStream = i; 321 | return 0; 322 | } 323 | } 324 | return Constat::system_error; 325 | } 326 | 327 | int Decoder::decoding() { 328 | 329 | AVPacket packet; 330 | AVPacket h264Packet; 331 | av_init_packet(&packet); 332 | av_init_packet(&h264Packet); 333 | //h264Packet = av_packet_alloc(); 334 | 335 | int ret = 0; 336 | int got_frame = 0; 337 | while(1) { 338 | 339 | //读取一帧 340 | if ((ret = av_read_frame(pFormatCtx, &packet)) != 0) { 341 | if (ret == AVERROR_EOF) { 342 | cout << "av_read_frame failed, ret: " << ret << endl; 343 | } else { 344 | cout << "av_read_frame ret eof!" << endl; 345 | } 346 | break; 347 | } 348 | 349 | //判断解码包的流是视频流 350 | if (packet.stream_index != videoStream) { 351 | av_packet_unref(&packet); 352 | continue; 353 | } 354 | 355 | //1、解码帧 356 | if ((ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, &packet)) < 0) { 357 | cout << "avcodec_decode_video2 ret: " << ret << endl; 358 | av_packet_unref(&packet); 359 | continue; 360 | } 361 | if(!got_frame){ 362 | cout << "avcodec_decode_video2 got_frame: " << got_frame << endl; 363 | av_packet_unref(&packet); 364 | continue; 365 | } 366 | 367 | if(skip == 0 || decodeFrameNum % skip == 0) { 368 | 369 | //2、avfilter逻辑 370 | //获取解码后的pts - 显示时间 371 | pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); 372 | 373 | stringstream ss; 374 | ss << "avfilter - " << getFormatTime(); 375 | av_opt_set(filterCtx->priv, "text", ss.str().c_str(), 0); 376 | 377 | // push the decoded frame into the filtergraph 378 | if (av_buffersrc_add_frame(buffersrcCtx, pFrame) < 0) { 379 | cout << "av_buffersrc_add_frame failed!!" << endl; 380 | break; 381 | } 382 | 383 | // pull filtered pictures from the filtergraph 384 | if (av_buffersink_get_frame(buffersinkCtx, h264Frame) < 0) { 385 | cout << "av_buffersink_get_frame failed!!" << endl; 386 | break; 387 | } 388 | 389 | //3、编码帧 390 | ret = avcodec_send_frame(h264CodecCtx, h264Frame); 391 | if (ret < 0){ 392 | cout << "avcodec_send_frame failed, ret:" << ret << endl; 393 | return Constat::system_error; 394 | } 395 | 396 | while (ret >= 0) { 397 | 398 | ret = avcodec_receive_packet(h264CodecCtx, &h264Packet); 399 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 400 | break; 401 | } 402 | 403 | if (ret < 0){ 404 | cout << "avcodec_receive_packet failed, ret:" << ret << endl; 405 | break; 406 | } 407 | 408 | h264Packet.stream_index = outVideoindex; 409 | if ((ret = av_interleaved_write_frame(outFormatCtx, &h264Packet)) < 0) { 410 | cout << "av_interleaved_write_frame failed, ret:" << ret << endl; 411 | } 412 | } 413 | 414 | av_frame_unref(pFrame); 415 | av_frame_unref(h264Frame); 416 | encodeFrameNum++; 417 | } 418 | 419 | av_packet_unref(&packet); 420 | av_packet_unref(&h264Packet); 421 | decodeFrameNum++; 422 | cout << "time: " << getFormatTime() << " encodeFrameNum: " << encodeFrameNum << " decodeFrameNum: " << decodeFrameNum << endl; 423 | } 424 | 425 | //Write file trailer 426 | av_write_trailer(outFormatCtx); 427 | return Constat::ok; 428 | } 429 | 430 | //void Decoder::saveImage(std::string filename) { 431 | // cv::Mat mat; 432 | // mat.create(cv::Size(imgWidth, imgHeight), CV_8UC3); 433 | // mat.data = buffer; 434 | // if (!cv::imwrite(filename, mat)) { 435 | // cout<< "saveImage failed, filename: " << filename << ", pts: " << pts << endl; 436 | // } else { 437 | // cout<< "saveImage success, filename: " << filename << ", pts: " << pts << endl; 438 | // } 439 | //} 440 | // 441 | //std::string Decoder::getImageName() { 442 | // time_t ts = time(NULL); 443 | // std::stringstream ss; 444 | // ss << "/tmp/"; 445 | // ss << ts; 446 | // ss << "_"; 447 | // ss << decodeFrameNum; 448 | // ss << "."; 449 | // ss << "jpg"; 450 | // return ss.str(); 451 | //} 452 | 453 | void Decoder::closeDecode() { 454 | 455 | avfilter_graph_free(&filterGraph); 456 | 457 | av_frame_free(&h264Frame); 458 | av_frame_free(&pFrame); 459 | 460 | avcodec_close(pCodecCtx); 461 | avcodec_free_context(&pCodecCtx); 462 | 463 | avcodec_close(pCodecCtxOrig); 464 | //avcodec_free_context(&pCodecCtxOrig); //Mustn't free 465 | 466 | //avfilter_free(filterCtx); 467 | 468 | avformat_close_input(&pFormatCtx); 469 | av_dict_free(&pAvDict); 470 | } -------------------------------------------------------------------------------- /ffmpeg-video-avstream/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #ifdef __cplusplus 19 | } 20 | #endif 21 | 22 | #include 23 | #include 24 | 25 | class Decoder { 26 | 27 | public: 28 | void decode(std::string, std::string); 29 | 30 | private: 31 | int initDecode(std::string); 32 | int initEncode(); 33 | int initFilter(); 34 | int initPusher(std::string); 35 | int findVideoStreamIndex(); 36 | int decoding(); 37 | std::string getImageName(); 38 | void saveImage(std::string); 39 | void closeDecode(); 40 | 41 | private: 42 | 43 | int imgWidth = 0; 44 | int imgHeight = 0; 45 | int imgSize = 0; 46 | int decodeFps = 0; 47 | int encodeFps = 0; 48 | int skip = 0; 49 | int decodeFrameNum = 0; 50 | int encodeFrameNum = 0; 51 | 52 | //decode 53 | int videoStream = -1; 54 | AVDictionary *pAvDict = nullptr; 55 | AVFormatContext *pFormatCtx = nullptr; 56 | AVCodecContext *pCodecCtxOrig = nullptr; 57 | AVCodecContext *pCodecCtx = nullptr; 58 | AVFrame *pFrame = nullptr; 59 | 60 | //encode 61 | int h264ImgSize = 0; 62 | AVCodec *h264Codec = nullptr; 63 | AVFrame *h264Frame = nullptr; 64 | uint8_t *h264Buffer = nullptr; 65 | AVDictionary *h264Dict = nullptr; 66 | AVCodecContext *h264CodecCtx = nullptr; 67 | 68 | //avfilter 69 | AVFilterGraph *filterGraph; 70 | AVFilterContext *buffersrcCtx; 71 | AVFilterContext *buffersinkCtx; 72 | AVFilterContext *filterCtx; 73 | 74 | //pushstream 75 | int outVideoindex = -1; 76 | AVFormatContext *outFormatCtx = nullptr; 77 | 78 | //uint8_t *buffer = nullptr; 79 | 80 | //char *filtersDesc = "drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'"; 81 | char *filtersDesc = "[in]drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'[text];movie=test.png[wm];[text][wm]overlay=0:0[out]"; 82 | 83 | }; 84 | 85 | enum Constat { 86 | ok = 0, 87 | system_error = -1 88 | }; 89 | 90 | #endif //HELLO_DECODE_DECODE_H 91 | -------------------------------------------------------------------------------- /ffmpeg-video-avstream/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | using namespace std; 5 | 6 | /** 7 | * ./video_avstream http://.... rtsp://.... 8 | * 9 | * @param argc 10 | * @param argv 11 | * @return 12 | */ 13 | int main(int argc, char** argv) { 14 | 15 | if (argc <= 2) { 16 | cout << "please input valid arguments." << endl; 17 | return 0; 18 | } 19 | 20 | Decoder d; 21 | d.decode(argv[1], argv[2]); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /ffmpeg-video-avstream/src/time_util.h: -------------------------------------------------------------------------------- 1 | #ifndef FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 2 | #define FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 3 | 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | string getFormatTime() 9 | { 10 | time_t now_time; 11 | char buf[64] = {0}; 12 | now_time=time(NULL); 13 | strftime(buf, 128,"%Y-%m-%d %H:%M:%S", localtime(&now_time)); 14 | return buf; 15 | } 16 | 17 | #endif -------------------------------------------------------------------------------- /ffmpeg-video-decoder/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-decoder) 4 | 5 | set(TARGET decoder) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avcodec avutil swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-decoder/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流解码成图片。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-decoder 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./decoder 10 https://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-decoder/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time.h" 5 | 6 | using namespace std; 7 | 8 | void Decoder::decode(int num, std::string source) { 9 | 10 | decodeId++; 11 | decodeCount = num; 12 | 13 | int ret; 14 | if ((ret = initDecode(source)) < 0) { 15 | cout << "init decode fail :" << ret << endl; 16 | return; 17 | } 18 | 19 | if ((ret = decoding()) < 0) { 20 | cout << "decoding fail :" << ret << endl; 21 | return; 22 | } 23 | 24 | closeDecode(); 25 | } 26 | 27 | int Decoder::initDecode(std::string source) { 28 | 29 | av_register_all(); 30 | 31 | int ret; 32 | if ((ret = avformat_network_init()) != 0) { 33 | cout << "avformat_network_init failed, ret: " << ret << endl; 34 | return ret; 35 | } 36 | 37 | ret = avformat_open_input(&pFormatCtx, source.c_str(), nullptr, &pAvDict); 38 | if (ret != 0) { 39 | cout << "avformat_open_input failed, ret: " << ret << endl; 40 | return ret; 41 | } 42 | 43 | ret = avformat_find_stream_info(pFormatCtx, nullptr); 44 | if (ret < 0) { 45 | cout << "avformat_find_stream_info failed, ret: " << ret << endl; 46 | return ret; 47 | } 48 | 49 | if ((ret = findVideoStreamIndex()) < 0) { 50 | cout << "findVideoStreamIndex failed, ret: " << ret << endl; 51 | return ret; 52 | } 53 | 54 | // Get a pointer to the codec context for the video stream 55 | pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 56 | 57 | // Find the decoder for the video stream 58 | AVCodec *pCodec = nullptr; 59 | pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 60 | if(pCodec == NULL) { 61 | cout << "Unsupported codec!" << endl; 62 | return Constat::system_error; // Codec not found 63 | } 64 | 65 | // Copy context 66 | pCodecCtx = avcodec_alloc_context3(pCodec); 67 | if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 68 | cout << "Couldn't copy codec context!" << ret << endl; 69 | return Constat::system_error; 70 | } 71 | 72 | // Open codec 73 | if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 74 | cout << "Could not open codec!" << ret << endl; 75 | return Constat::system_error; 76 | } 77 | 78 | if(pCodecCtx->framerate.den > 0) { 79 | int fps = (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den); 80 | if(decodeFps == 0) { 81 | decodeFps = fps; 82 | } 83 | skip = fps / decodeFps; 84 | } 85 | 86 | // Allocate video frame 87 | pFrame=av_frame_alloc(); 88 | 89 | // Allocate an AVFrame structure 90 | pFrameRGB=av_frame_alloc(); 91 | 92 | // Determine required buffer size and allocate buffer 93 | imgWidth = pCodecCtx->width; 94 | imgHeight = pCodecCtx->height; 95 | imgSize = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 96 | buffer = (uint8_t *)av_malloc(imgSize * sizeof(uint8_t)); 97 | 98 | // Assign appropriate parts of buffer to image planes in pFrameRGB 99 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 100 | // of AVPicture 101 | avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, imgWidth, imgHeight); 102 | 103 | // initialize SWS context for software scaling 104 | sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 105 | pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 106 | AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr); 107 | 108 | cout << "open stream succ, source: " << source << ", find video stream idx: " << videoStream 109 | << ", width: " << imgWidth << ", height: " << imgHeight << ", size: " << imgSize << ", skip: " << skip 110 | << ", decodeCount: " << decodeCount << endl; 111 | return Constat::ok; 112 | } 113 | 114 | int Decoder::findVideoStreamIndex() { 115 | for (size_t i = 0; i < pFormatCtx->nb_streams; i++) { 116 | if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 117 | videoStream = i; 118 | return 0; 119 | } 120 | } 121 | return -1; 122 | } 123 | 124 | int Decoder::decoding() { 125 | 126 | AVPacket packet; 127 | av_init_packet(&packet); 128 | int got_frame = 0; 129 | 130 | while(1) { 131 | 132 | int ret = av_read_frame(pFormatCtx, &packet); 133 | if (ret != 0) { 134 | if (ret == AVERROR_EOF) { 135 | cout << "av_read_frame failed, ret: " << ret << endl; 136 | } else { 137 | cout << "av_read_frame ret eof!" << endl; 138 | } 139 | break; 140 | } 141 | 142 | if (packet.stream_index != videoStream) { 143 | av_packet_unref(&packet); 144 | continue; 145 | } 146 | 147 | if ((ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, &packet)) < 0) { 148 | cout << "avcodec_decode_video2 ret: " << ret << endl; 149 | av_packet_unref(&packet); 150 | continue; 151 | } 152 | 153 | pts = packet.pts; 154 | if (got_frame > 0) { 155 | 156 | if(decodeCount == 0) { 157 | cout << "decode finish!!!" << endl; 158 | break; 159 | } 160 | 161 | if(skip == 0 || decodeFrameNum % skip == 0) { 162 | decodeCount--; 163 | sws_scale(sws_ctx, (const uint8_t *const *)pFrame->data, 164 | pFrame->linesize, 0, imgHeight, pFrameRGB->data, pFrameRGB->linesize); 165 | saveImage(getImageName()); 166 | } 167 | 168 | decodeFrameNum++; 169 | } 170 | 171 | av_packet_unref(&packet); 172 | } 173 | 174 | return Constat::ok; 175 | } 176 | 177 | void Decoder::saveImage(std::string filename) { 178 | cv::Mat mat; 179 | mat.create(cv::Size(imgWidth, imgHeight), CV_8UC3); 180 | mat.data = buffer; 181 | if (!cv::imwrite(filename, mat)) { 182 | cout<< "saveImage failed, filename: " << filename << ", pts: " << pts << endl; 183 | } else { 184 | cout<< "saveImage success, filename: " << filename << ", pts: " << pts << endl; 185 | } 186 | } 187 | 188 | std::string Decoder::getImageName() { 189 | time_t ts = time(NULL); 190 | std::stringstream ss; 191 | ss << "/tmp/"; 192 | ss << decodeId; 193 | ss << "_"; 194 | ss << ts; 195 | ss << "_"; 196 | ss << decodeFrameNum; 197 | ss << "."; 198 | ss << "jpg"; 199 | return ss.str(); 200 | } 201 | 202 | void Decoder::closeDecode() { 203 | 204 | av_frame_free(&pFrameRGB); 205 | av_frame_free(&pFrame); 206 | 207 | avcodec_close(pCodecCtx); 208 | avcodec_free_context(&pCodecCtx); 209 | 210 | avcodec_close(pCodecCtxOrig); 211 | //avcodec_free_context(&pCodecCtxOrig); //Mustn't free 212 | 213 | avformat_close_input(&pFormatCtx); 214 | av_dict_free(&pAvDict); 215 | } -------------------------------------------------------------------------------- /ffmpeg-video-decoder/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #ifdef __cplusplus 13 | } 14 | #endif 15 | 16 | #include 17 | #include 18 | 19 | class Decoder { 20 | 21 | public: 22 | void decode(int, std::string); 23 | 24 | private: 25 | int initDecode(std::string); 26 | int findVideoStreamIndex(); 27 | int decoding(); 28 | std::string getImageName(); 29 | void saveImage(std::string); 30 | void closeDecode(); 31 | 32 | private: 33 | 34 | int videoStream = -1; 35 | AVDictionary *pAvDict = nullptr; 36 | AVFormatContext *pFormatCtx = nullptr; 37 | AVCodecContext *pCodecCtxOrig = nullptr; 38 | AVCodecContext *pCodecCtx = nullptr; 39 | SwsContext *sws_ctx = nullptr; 40 | 41 | AVFrame *pFrame = nullptr; 42 | AVFrame *pFrameRGB = nullptr; 43 | uint8_t *buffer = nullptr; 44 | 45 | int imgWidth = 0; 46 | int imgHeight = 0; 47 | int imgSize = 0; 48 | int decodeFps = 1; 49 | int skip = 0; 50 | int pts = 0; 51 | 52 | int decodeId = 0; 53 | int decodeCount = 0; 54 | int decodeFrameNum = 0; 55 | 56 | }; 57 | 58 | enum Constat { 59 | ok = 0, 60 | system_error = -1 61 | }; 62 | 63 | #endif //HELLO_DECODE_DECODE_H 64 | -------------------------------------------------------------------------------- /ffmpeg-video-decoder/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | using namespace std; 5 | 6 | /** 7 | * ./decoder 10 rtsp://......... 8 | * 9 | * @param argc 10 | * @param argv 11 | * @return 12 | */ 13 | int main(int argc, char** argv) { 14 | 15 | if (argc <= 2) { 16 | cout << "please input valid arguments." << endl; 17 | return 0; 18 | } 19 | 20 | Decoder d; 21 | d.decode(atoi(argv[1]), argv[2]); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /ffmpeg-video-image-push/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-image-push) 4 | 5 | set(TARGET image_push) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avcodec avutil swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-image-push/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流解码成图片。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-image-push 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./image_push rtsp://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-image-push/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time.h" 5 | #include 6 | 7 | using namespace std; 8 | 9 | void Decoder::decode(std::string desc) { 10 | 11 | int ret; 12 | if ((ret = initDecode()) < 0) { 13 | cout << "init decode fail :" << ret << endl; 14 | return; 15 | } 16 | 17 | if ((ret = initEncode()) < 0) { 18 | cout << "init encode fail :" << ret << endl; 19 | return; 20 | } 21 | 22 | if ((ret = initPusher(desc)) < 0) { 23 | cout << "init pusher fail :" << ret << endl; 24 | return; 25 | } 26 | 27 | if ((ret = decoding()) < 0) { 28 | cout << "decoding fail :" << ret << endl; 29 | return; 30 | } 31 | 32 | closeDecode(); 33 | } 34 | 35 | int Decoder::initDecode() { 36 | 37 | av_register_all(); 38 | 39 | int ret; 40 | if ((ret = avformat_network_init()) != 0) { 41 | cout << "avformat_network_init failed, ret: " << ret << endl; 42 | return ret; 43 | } 44 | 45 | const AVCodec *pCodec; 46 | pCodec = avcodec_find_decoder(AV_CODEC_ID_MJPEG); 47 | parserJpg = av_parser_init(pCodec->id); 48 | pCodecCtx = avcodec_alloc_context3(pCodec); 49 | if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 50 | cout << "decode avcodec_open2 failed." << endl; 51 | return Constat::system_error; 52 | } 53 | 54 | //解码的包 55 | pPacket = av_packet_alloc(); 56 | 57 | //解码的帧 58 | pFrame = av_frame_alloc(); 59 | 60 | cout << "init decode succ !!!" << endl; 61 | return Constat::ok; 62 | } 63 | 64 | int Decoder::initEncode() { 65 | 66 | //获取h264编码器 67 | h264Codec = avcodec_find_encoder(AV_CODEC_ID_H264); 68 | if (!h264Codec){ 69 | cout << "avcodec_find_encoder failed" << endl; 70 | return Constat::system_error; 71 | } 72 | 73 | //创建编解码器的context,context可以用来设置编解码过程所需要的各种配置信息 74 | h264CodecCtx = avcodec_alloc_context3(h264Codec); 75 | if (!h264CodecCtx){ 76 | cout << "avcodec_find_encoder failed" << endl; 77 | return Constat::system_error; 78 | } 79 | 80 | //h264CodecCtx->bit_rate = 400000; //码率。这里设置固定码率应该没啥用。 81 | h264CodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; //编码的原始数据格式 82 | h264CodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; //指定为视频编码 83 | h264CodecCtx->width = 1920; //分辨率宽 84 | h264CodecCtx->height = 1080; //分辨率高 85 | h264CodecCtx->channels = 0; //音频通道数 86 | h264CodecCtx->time_base = {1, encodeFps}; //时间基,表示每个时间刻度是多少秒。 87 | h264CodecCtx->framerate = {encodeFps, 1}; //帧率,没秒 88 | h264CodecCtx->gop_size = 2 * encodeFps; //图像组两个关键帧(I帧)的距离,也就是一组帧的数量 原来是10 89 | h264CodecCtx->max_b_frames = 5; //指定B帧数量,B帧是双向参考帧,填充更多B帧,压缩率更高但是延迟也高 90 | h264CodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //暂时不明 91 | 92 | //av_opt_set(h264CodecCtx->priv_data, "preset", "slow", 0); //慢速压缩编码,慢的可以保证视频质量 93 | //av_opt_set(h264CodecCtx->priv_data, "preset", "veryfast", 0); 94 | av_opt_set(h264CodecCtx->priv_data, "preset", "ultrafast", 0); //快速编码,但会损失质量 95 | //av_opt_set(h264CodecCtx->priv_data, "tune", "zerolatency", 0); //适用于快速编码和低延迟流式传输,但是会出现绿屏 96 | 97 | //打开编码器 98 | if (avcodec_open2(h264CodecCtx, h264Codec, NULL) < 0){ 99 | cout << "encode avcodec_open2 failed!" << endl; 100 | return Constat::system_error; 101 | } 102 | 103 | h264Packet = av_packet_alloc(); 104 | 105 | cout << "init encode succ !!!" << endl; 106 | return Constat::ok; 107 | } 108 | 109 | int Decoder::initPusher(std::string desc) { 110 | 111 | //初始化rtsp推流的AVFormatContext 112 | int ret; 113 | if ((ret = avformat_alloc_output_context2(&outFormatCtx, NULL, "rtsp", desc.c_str())) < 0){ 114 | cout << "avformat_alloc_output_context2 failed, ret: " << ret << endl; 115 | return Constat::system_error; 116 | } 117 | //检查所有流是否都有数据,如果没有数据会等待最大时间,单位微秒。 118 | outFormatCtx->max_interleave_delta = 1000000; 119 | 120 | //创建一个视频流AVStream 121 | AVStream *outAvStream = avformat_new_stream(outFormatCtx, h264Codec); 122 | if (!outAvStream){ 123 | cout << "avformat_new_stream failed!" << endl; 124 | return Constat::system_error; 125 | } 126 | 127 | //记录视频流的id。这里肯定是0,因为也只是创建了一个视频流 128 | outVideoindex = outFormatCtx->nb_streams - 1; 129 | 130 | //输出流的时间基 131 | outAvStream->time_base = { 1, encodeFps }; 132 | 133 | //设置流Id。 134 | outAvStream->id = outFormatCtx->nb_streams - 1; 135 | 136 | //复制编码器AVCodecContext上下文到输出流的codec字段 137 | if (avcodec_copy_context(outAvStream->codec, h264CodecCtx) < 0) { 138 | cout << "failed avcodec_copy_context" << endl; 139 | return Constat::system_error; 140 | } 141 | 142 | //这是用来解决一些编码器的错误。先传0。 143 | outAvStream->codec->codec_tag = 0; 144 | 145 | //暂时不懂 146 | if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) { 147 | outAvStream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 148 | } 149 | 150 | //流拷贝编码器的参数配置 151 | avcodec_parameters_from_context(outAvStream->codecpar, h264CodecCtx); 152 | 153 | av_dump_format(outFormatCtx, 0, outFormatCtx->filename, 1); 154 | 155 | //如果不是文件,则不能调用avio_open进行打开, 156 | if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE)) { 157 | //打开输出URL 158 | if (avio_open(&outFormatCtx->pb, outFormatCtx->filename, AVIO_FLAG_WRITE) < 0) { 159 | cout << "avio_open failed, file: " << desc << endl; 160 | return Constat::system_error; 161 | } 162 | } 163 | 164 | //使用tcp协议传输 这是一种参数设置方式 165 | //av_opt_set(outFormatCtx->priv_data, "rtsp_transport", "tcp", 0); 166 | 167 | //另外一种参数设置方式 168 | av_dict_set(&h264Dict, "bufsize", "10240", 0); 169 | av_dict_set(&h264Dict, "stimeout", "6000000", 0); 170 | av_dict_set(&h264Dict, "rtsp_transport","tcp",0); 171 | av_dict_set(&h264Dict, "muxdelay", "0.1", 0); 172 | av_dict_set(&h264Dict, "tune", "zerolatency", 0); 173 | outFormatCtx->audio_codec_id = outFormatCtx->oformat->audio_codec; 174 | outFormatCtx->video_codec_id = outFormatCtx->oformat->video_codec; 175 | ret = avformat_write_header(outFormatCtx, &h264Dict); 176 | if (ret < 0) { 177 | cout << "error occurred when opening output url: " << desc << endl; 178 | return ret; 179 | } 180 | 181 | cout << "init pusher succ !!!" << endl; 182 | return Constat::ok; 183 | } 184 | 185 | int Decoder::decoding() { 186 | 187 | //start_time=av_gettime(); 188 | for (int i = 1; i <= 10000; i++) { 189 | 190 | uint64_t startTime = getCurTimestamp(); 191 | std::string imageData; 192 | string imageName = getImageName(i); 193 | 194 | if (!isExistFile(imageName)) { 195 | cout << "==========finish push===========" << endl; 196 | return 0; 197 | } 198 | 199 | long filesize = getImageSize(imageName); 200 | if (filesize > imageData.size()) { 201 | imageData.resize(filesize); 202 | } 203 | FILE* fileImage = fopen(imageName.c_str(), "rb"); 204 | fread(&(imageData[0]), 1, filesize, fileImage); 205 | fclose(fileImage); 206 | 207 | //读取的jpeg数据。属于没有封装格式的裸流,区别就是不包含PTS、DTS这些参数的。 208 | uint8_t *in_data = (uint8_t *)(imageData.data()); 209 | size_t in_len = filesize; 210 | while (in_len > 0) { 211 | 212 | //通过av_parser_parse2拿到AVPaket数据 213 | int len = av_parser_parse2(parserJpg, pCodecCtx, &pPacket->data, &pPacket->size, in_data, in_len, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); 214 | if (len < 0) { 215 | cout << "av_parser_parse2 failed!" << endl; 216 | return Constat::system_error; 217 | } 218 | in_data += len; 219 | in_len -= len; 220 | 221 | if (pPacket->size) { 222 | cout << "parserJpg file: " << imageName << " i: " << i << " len: " << pPacket->size << endl; 223 | decodeJpg(i); 224 | } 225 | } 226 | 227 | //Important:Delay 228 | //av_usleep(pts_time - now_time); 229 | 230 | //TODO 推流其实需要pts、duration;也需要用av_usleep进行暂停 231 | uint64_t costTime = getCurTimestamp() - startTime; 232 | if ( frame_step > costTime ) { 233 | sleep_ms(frame_step - costTime); 234 | } 235 | 236 | } 237 | 238 | return Constat::ok; 239 | } 240 | 241 | int Decoder::decodeJpg(int64_t pts) { 242 | 243 | int ret = avcodec_send_packet(pCodecCtx, pPacket); //将原始数据包传给ffmpeg解码器 244 | if (ret < 0) { 245 | cout << "decodeJpg avcodec_send_packet failed, ret:" << ret << endl; 246 | return Constat::system_error; 247 | } 248 | 249 | while (ret >= 0) { 250 | 251 | ret = avcodec_receive_frame(pCodecCtx, pFrame); //从解码队列中取出1个frame 252 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { 253 | break; 254 | } 255 | 256 | if (ret < 0) { 257 | cout << "decodeJpg avcodec_receive_frame failed, ret:" << ret << endl; 258 | return Constat::system_error; 259 | } 260 | 261 | cout << "avcodec_receive_frame frame " << pCodecCtx->frame_number << endl; 262 | 263 | encodeYuvToH264(pts); 264 | } 265 | 266 | } 267 | 268 | int Decoder::encodeYuvToH264(int64_t pts) { 269 | 270 | pFrame->pts = pts; 271 | int ret = avcodec_send_frame(h264CodecCtx, pFrame); //将帧数据包传给ffmpeg编码器 272 | if (ret < 0){ 273 | cout << "encodeYuvToH264 avcodec_send_frame failed, ret:" << ret << endl; 274 | return Constat::system_error; 275 | } 276 | 277 | while (ret >= 0) { 278 | 279 | ret = avcodec_receive_packet(h264CodecCtx, h264Packet); //从编码队列中取出1个packagt 280 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 281 | break; 282 | } 283 | 284 | if (ret < 0){ 285 | cout << "encodeYuvToH264 avcodec_receive_packet failed, ret:" << ret << endl; 286 | break; 287 | } 288 | 289 | //h264Packet->stream_index = outVideoindex; 290 | if ((ret = av_interleaved_write_frame(outFormatCtx, h264Packet)) < 0) { 291 | cout << "encodeYuvToH264 av_interleaved_write_frame failed, ret:" << ret << endl; 292 | } 293 | } 294 | 295 | av_frame_unref(pFrame); 296 | } 297 | 298 | 299 | std::string Decoder::getImageName(int num) { 300 | std::stringstream ss; 301 | ss << "/root/ffmpeg-cpp-video-processes/image/"; 302 | ss << num; 303 | ss << ".jpg"; 304 | return ss.str(); 305 | } 306 | 307 | long Decoder::getImageSize(string filename) { 308 | FILE* f = fopen(filename.c_str(), "rb"); 309 | fseek(f,0,SEEK_END); 310 | long s = ftell(f); 311 | fclose(f); 312 | return s; 313 | } 314 | 315 | bool Decoder::isExistFile(string filename) { 316 | return access(filename.c_str(), 0) == 0; 317 | } 318 | 319 | uint64_t Decoder::getCurTimestamp(){ 320 | struct timeval cur; 321 | gettimeofday(&cur, NULL); 322 | return cur.tv_sec * 1000 + cur.tv_usec / 1000; 323 | } 324 | 325 | 326 | void Decoder::closeDecode() { 327 | avcodec_free_context(&h264CodecCtx); 328 | av_frame_free(&pFrame); 329 | av_packet_free(&h264Packet); 330 | av_packet_free(&pPacket); 331 | av_parser_close(parserJpg); 332 | avcodec_free_context(&pCodecCtx); 333 | } -------------------------------------------------------------------------------- /ffmpeg-video-image-push/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #ifdef __cplusplus 19 | } 20 | #endif 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | class Decoder { 28 | 29 | public: 30 | void decode(std::string); 31 | 32 | private: 33 | int initDecode(); 34 | int initEncode(); 35 | int initPusher(std::string); 36 | int decoding(); 37 | bool isExistFile(std::string); 38 | std::string getImageName(int); 39 | long getImageSize(std::string); 40 | void closeDecode(); 41 | 42 | int decodeJpg(int64_t pts); 43 | int encodeYuvToH264(int64_t pts); 44 | uint64_t getCurTimestamp(); 45 | 46 | private: 47 | 48 | //decode 49 | AVPacket *pPacket = nullptr; 50 | AVCodecParserContext *parserJpg = nullptr; 51 | AVCodecContext *pCodecCtx = nullptr; 52 | AVFrame *pFrame = nullptr; 53 | 54 | //encode 55 | bool first = true; 56 | int encodeFps = 5; 57 | int frame_step = 200; 58 | AVCodec *h264Codec = nullptr; 59 | AVDictionary *h264Dict = nullptr; 60 | AVCodecContext *h264CodecCtx = nullptr; 61 | AVPacket *h264Packet = nullptr; 62 | 63 | //pushstream 64 | int outVideoindex = -1; 65 | AVFormatContext *outFormatCtx = nullptr; 66 | 67 | }; 68 | 69 | enum Constat { 70 | ok = 0, 71 | system_error = -1 72 | }; 73 | 74 | static void sleep_ms(unsigned int secs){ 75 | 76 | struct timeval tval; 77 | tval.tv_sec=secs/1000; 78 | tval.tv_usec=(secs*1000)%1000000; 79 | select(0,NULL,NULL,NULL,&tval); 80 | } 81 | 82 | #endif //HELLO_DECODE_DECODE_H 83 | -------------------------------------------------------------------------------- /ffmpeg-video-image-push/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | using namespace std; 5 | 6 | /** 7 | * ./image_push rtsp://.... 8 | * 9 | * @param argc 10 | * @param argv 11 | * @return 12 | */ 13 | int main(int argc, char** argv) { 14 | 15 | if (argc <= 1) { 16 | cout << "please input valid arguments." << endl; 17 | return 0; 18 | } 19 | 20 | Decoder e; 21 | e.decode(argv[1]); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /ffmpeg-video-overlay/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-overlay) 4 | 5 | set(TARGET overlay_decoder) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avfilter avcodec avutil postproc swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl freetype) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-overlay/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流解码成图片的基础上加上了文字水印和图片水印的能力。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-overlay 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./overlay_decoder 10 http://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-overlay/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time_util.h" 5 | 6 | using namespace std; 7 | 8 | void Decoder::decode(int num, std::string source) { 9 | 10 | decodeId++; 11 | decodeCount = num; 12 | 13 | int ret; 14 | if ((ret = initDecode(source)) < 0) { 15 | cout << "init decode fail :" << ret << endl; 16 | return; 17 | } 18 | 19 | if ((ret = initFilter()) < 0) { 20 | cout << "init filter fail :" << ret << endl; 21 | return; 22 | } 23 | 24 | if ((ret = decoding()) < 0) { 25 | cout << "decoding fail :" << ret << endl; 26 | return; 27 | } 28 | 29 | closeDecode(); 30 | } 31 | 32 | int Decoder::initDecode(std::string source) { 33 | 34 | av_register_all(); 35 | 36 | int ret; 37 | if ((ret = avformat_network_init()) != 0) { 38 | cout << "avformat_network_init failed, ret: " << ret << endl; 39 | return ret; 40 | } 41 | 42 | ret = avformat_open_input(&pFormatCtx, source.c_str(), nullptr, &pAvDict); 43 | if (ret != 0) { 44 | cout << "avformat_open_input failed, ret: " << ret << endl; 45 | return ret; 46 | } 47 | 48 | ret = avformat_find_stream_info(pFormatCtx, nullptr); 49 | if (ret < 0) { 50 | cout << "avformat_find_stream_info failed, ret: " << ret << endl; 51 | return ret; 52 | } 53 | 54 | if ((ret = findVideoStreamIndex()) < 0) { 55 | cout << "findVideoStreamIndex failed, ret: " << ret << endl; 56 | return ret; 57 | } 58 | 59 | // Get a pointer to the codec context for the video stream 60 | pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 61 | 62 | // Find the decoder for the video stream 63 | AVCodec *pCodec = nullptr; 64 | pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 65 | if(pCodec == NULL) { 66 | cout << "Unsupported codec!" << endl; 67 | return Constat::system_error; // Codec not found 68 | } 69 | 70 | // Copy context 71 | pCodecCtx = avcodec_alloc_context3(pCodec); 72 | if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 73 | cout << "Couldn't copy codec context!" << ret << endl; 74 | return Constat::system_error; 75 | } 76 | 77 | // Open codec 78 | if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 79 | cout << "Could not open codec!" << ret << endl; 80 | return Constat::system_error; 81 | } 82 | 83 | if(pCodecCtx->framerate.den > 0) { 84 | int fps = (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den); 85 | if(decodeFps == 0) { 86 | decodeFps = fps; 87 | } 88 | skip = fps / decodeFps; 89 | } 90 | 91 | // Allocate video frame 92 | pFrame=av_frame_alloc(); 93 | 94 | // Allocate an AVFrame structure 95 | pFrameRGB=av_frame_alloc(); 96 | 97 | // Allocate an AVFrame structure 98 | pFrameOut=av_frame_alloc(); 99 | 100 | // Determine required buffer size and allocate buffer 101 | imgWidth = pCodecCtx->width; 102 | imgHeight = pCodecCtx->height; 103 | imgSize = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 104 | buffer = (uint8_t *)av_malloc(imgSize * sizeof(uint8_t)); 105 | 106 | // Assign appropriate parts of buffer to image planes in pFrameRGB 107 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 108 | // of AVPicture 109 | avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, imgWidth, imgHeight); 110 | 111 | // initialize SWS context for software scaling 112 | sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 113 | pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 114 | AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr); 115 | 116 | cout << "open stream succ, source: " << source << ", find video stream idx: " << videoStream 117 | << ", width: " << imgWidth << ", height: " << imgHeight << ", size: " << imgSize << ", skip: " << skip 118 | << ", decodeCount: " << decodeCount << endl; 119 | return Constat::ok; 120 | } 121 | 122 | int Decoder::initFilter() { 123 | 124 | avfilter_register_all(); 125 | 126 | char args[512]; 127 | AVFilter *buffersrc = avfilter_get_by_name("buffer"); 128 | AVFilter *buffersink = avfilter_get_by_name("buffersink"); 129 | AVFilterInOut *outputs = avfilter_inout_alloc(); 130 | AVFilterInOut *inputs = avfilter_inout_alloc(); 131 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; 132 | 133 | /* buffer video source: the decoded frames from the decoder will be inserted here. */ 134 | filterGraph = avfilter_graph_alloc(); 135 | snprintf(args, sizeof(args), 136 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 137 | pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 138 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, 139 | pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den); 140 | 141 | int ret = avfilter_graph_create_filter(&buffersrcCtx, buffersrc, "in", args, NULL, filterGraph); 142 | if (ret < 0) { 143 | cout << "Cannot create buffer source!" << endl; 144 | return ret; 145 | } 146 | 147 | /* buffer video sink: to terminate the filter chain. */ 148 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 149 | buffersink_params->pixel_fmts = pix_fmts; 150 | ret = avfilter_graph_create_filter(&buffersinkCtx, buffersink, "out", NULL, buffersink_params, filterGraph); 151 | av_free(buffersink_params); 152 | if (ret < 0) { 153 | cout << "Cannot create buffer sink!" << endl; 154 | return ret; 155 | } 156 | 157 | /* Endpoints for the filter graph. */ 158 | outputs->name = av_strdup("in"); 159 | outputs->filter_ctx = buffersrcCtx; 160 | outputs->pad_idx = 0; 161 | outputs->next = NULL; 162 | 163 | inputs->name = av_strdup("out"); 164 | inputs->filter_ctx = buffersinkCtx; 165 | inputs->pad_idx = 0; 166 | inputs->next = NULL; 167 | 168 | if ((ret = avfilter_graph_parse_ptr(filterGraph, filtersDesc, &inputs, &outputs, NULL)) < 0){ 169 | cout << "avfilter_graph_parse_ptr failed, ret: " << ret << endl; 170 | return ret; 171 | } 172 | 173 | if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0) { 174 | cout << "avfilter_graph_config failed, ret: " << ret << endl; 175 | return ret; 176 | } 177 | 178 | filterCtx = filterGraph->filters[2]; 179 | return Constat::ok; 180 | } 181 | 182 | int Decoder::findVideoStreamIndex() { 183 | for (size_t i = 0; i < pFormatCtx->nb_streams; i++) { 184 | if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 185 | videoStream = i; 186 | return 0; 187 | } 188 | } 189 | return Constat::system_error; 190 | } 191 | 192 | 193 | int Decoder::decoding() { 194 | 195 | AVPacket packet; 196 | av_init_packet(&packet); 197 | int got_frame = 0; 198 | 199 | while(1) { 200 | 201 | int ret = av_read_frame(pFormatCtx, &packet); 202 | if (ret != 0) { 203 | if (ret == AVERROR_EOF) { 204 | cout << "av_read_frame failed, ret: " << ret << endl; 205 | } else { 206 | cout << "av_read_frame ret eof!" << endl; 207 | } 208 | break; 209 | } 210 | 211 | if (packet.stream_index != videoStream) { 212 | av_packet_unref(&packet); 213 | continue; 214 | } 215 | 216 | if ((ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, &packet)) < 0) { 217 | cout << "avcodec_decode_video2 ret: " << ret << endl; 218 | av_packet_unref(&packet); 219 | continue; 220 | } 221 | 222 | pts = packet.pts; 223 | if (got_frame > 0) { 224 | 225 | if(decodeCount == 0) { 226 | cout << "decode finish!!!" << endl; 227 | break; 228 | } 229 | 230 | if(skip == 0 || decodeFrameNum % skip == 0) { 231 | 232 | //avfilter 233 | pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); 234 | 235 | //av_opt_set 236 | stringstream ss; 237 | ss << "avfilter - " << getFormatTime(); 238 | av_opt_set(filterCtx->priv, "text", ss.str().c_str(), 0); 239 | 240 | /* push the decoded frame into the filtergraph */ 241 | if (av_buffersrc_add_frame(buffersrcCtx, pFrame) < 0) { 242 | cout << "Error while feeding the filtergraph!" << endl; 243 | break; 244 | } 245 | 246 | /* pull filtered pictures from the filtergraph */ 247 | if (av_buffersink_get_frame(buffersinkCtx, pFrameOut) < 0) { 248 | cout << "Error get filter frame the filtergraph!" << endl; 249 | break; 250 | } 251 | 252 | decodeCount--; 253 | sws_scale(sws_ctx, (const uint8_t *const *)pFrameOut->data, 254 | pFrameOut->linesize, 0, imgHeight, pFrameRGB->data, pFrameRGB->linesize); 255 | saveImage(getImageName()); 256 | 257 | av_frame_unref(pFrame); 258 | av_frame_unref(pFrameOut); 259 | } 260 | 261 | decodeFrameNum++; 262 | cout << "time: " << getFormatTime() << " decodeFrameNum: " << decodeFrameNum << endl; 263 | } 264 | 265 | av_packet_unref(&packet); 266 | } 267 | return Constat::ok; 268 | } 269 | 270 | void Decoder::saveImage(std::string filename) { 271 | cv::Mat mat; 272 | mat.create(cv::Size(imgWidth, imgHeight), CV_8UC3); 273 | mat.data = buffer; 274 | if (!cv::imwrite(filename, mat)) { 275 | cout<< "saveImage failed, filename: " << filename << ", pts: " << pts << endl; 276 | } else { 277 | cout<< "saveImage success, filename: " << filename << ", pts: " << pts << endl; 278 | } 279 | } 280 | 281 | std::string Decoder::getImageName() { 282 | time_t ts = time(NULL); 283 | std::stringstream ss; 284 | ss << "/tmp/"; 285 | ss << decodeId; 286 | ss << "_"; 287 | ss << ts; 288 | ss << "_"; 289 | ss << decodeFrameNum; 290 | ss << "."; 291 | ss << "jpg"; 292 | return ss.str(); 293 | } 294 | 295 | void Decoder::closeDecode() { 296 | 297 | avfilter_graph_free(&filterGraph); 298 | 299 | av_frame_free(&pFrameOut); 300 | av_frame_free(&pFrameRGB); 301 | av_frame_free(&pFrame); 302 | 303 | avcodec_close(pCodecCtx); 304 | avcodec_free_context(&pCodecCtx); 305 | 306 | avcodec_close(pCodecCtxOrig); 307 | //avcodec_free_context(&pCodecCtxOrig); //Mustn't free 308 | 309 | //avfilter_free(filterCtx); 310 | 311 | avformat_close_input(&pFormatCtx); 312 | av_dict_free(&pAvDict); 313 | } -------------------------------------------------------------------------------- /ffmpeg-video-overlay/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #ifdef __cplusplus 18 | } 19 | #endif 20 | 21 | #include 22 | #include 23 | 24 | class Decoder { 25 | 26 | public: 27 | void decode(int, std::string); 28 | 29 | private: 30 | int initDecode(std::string); 31 | int initFilter(); 32 | int findVideoStreamIndex(); 33 | int decoding(); 34 | std::string getImageName(); 35 | void saveImage(std::string); 36 | void closeDecode(); 37 | 38 | private: 39 | 40 | int videoStream = -1; 41 | AVDictionary *pAvDict = nullptr; 42 | AVFormatContext *pFormatCtx = nullptr; 43 | AVCodecContext *pCodecCtxOrig = nullptr; 44 | AVCodecContext *pCodecCtx = nullptr; 45 | SwsContext *sws_ctx = nullptr; 46 | 47 | AVFrame *pFrame = nullptr; 48 | AVFrame *pFrameRGB = nullptr; 49 | AVFrame *pFrameOut = nullptr; 50 | uint8_t *buffer = nullptr; 51 | 52 | int imgWidth = 0; 53 | int imgHeight = 0; 54 | int imgSize = 0; 55 | int decodeFps = 1; 56 | int skip = 0; 57 | int pts = 0; 58 | 59 | int decodeId = 0; 60 | int decodeCount = 0; 61 | int decodeFrameNum = 0; 62 | 63 | AVFilterGraph *filterGraph; 64 | AVFilterContext *buffersrcCtx; 65 | AVFilterContext *buffersinkCtx; 66 | AVFilterContext *filterCtx; 67 | 68 | //char *filtersDesc = "drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'"; 69 | //char *filtersDesc = "movie=test.png [watermark]; [in][watermark] overlay=0:0 [out]"; 70 | char *filtersDesc = "[in]drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'[text];movie=test.png[wm];[text][wm]overlay=0:0[out]"; 71 | 72 | }; 73 | 74 | enum Constat { 75 | ok = 0, 76 | system_error = -1 77 | }; 78 | 79 | #endif //HELLO_DECODE_DECODE_H 80 | -------------------------------------------------------------------------------- /ffmpeg-video-overlay/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | #include 5 | #include 6 | 7 | using namespace cv; 8 | using namespace std; 9 | 10 | /** 11 | * ./overlay_decoder 10 https://......... 12 | * 13 | * @param argc 14 | * @param argv 15 | * @return 16 | */ 17 | int main(int argc, char** argv) { 18 | 19 | if (argc <= 2) { 20 | cout << "please input valid arguments." << endl; 21 | return 0; 22 | } 23 | 24 | Decoder d; 25 | d.decode(atoi(argv[1]), argv[2]); 26 | 27 | return 0; 28 | } 29 | -------------------------------------------------------------------------------- /ffmpeg-video-overlay/src/time_util.h: -------------------------------------------------------------------------------- 1 | #ifndef FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 2 | #define FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 3 | 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | string getFormatTime() 9 | { 10 | time_t now_time; 11 | char buf[64] = {0}; 12 | now_time=time(NULL); 13 | strftime(buf, 128,"%Y-%m-%d %H:%M:%S", localtime(&now_time)); 14 | return buf; 15 | } 16 | 17 | #endif -------------------------------------------------------------------------------- /ffmpeg-video-overlay/src/trans_image.h: -------------------------------------------------------------------------------- 1 | #ifndef FFMPEG_VIDEO_OVERLAY_TRANS_UTIL_H 2 | #define FFMPEG_VIDEO_OVERLAY_TRANS_UTIL_H 3 | 4 | void createImage() { 5 | 6 | cv::Mat mat(720, 1280, CV_8UC3, CV_RGB(255, 255, 255)); 7 | IplImage *image = new IplImage(mat); 8 | 9 | int arr[1]= {5}; 10 | CvPoint ** pt = new CvPoint*[1]; 11 | pt[0] = new CvPoint[4]; 12 | pt[0][0] = cvPoint(100, 200); 13 | pt[0][1] = cvPoint(400, 80); 14 | pt[0][2] = cvPoint(800, 250); 15 | pt[0][3] = cvPoint(900, 600); 16 | pt[0][4] = cvPoint(200, 600); 17 | cvPolyLine(image, pt, arr, 1, 1, CV_RGB(255, 0, 0), 2); 18 | 19 | IplImage *dstImage = 0; 20 | CvSize dst_cvsize; 21 | dst_cvsize.width = image->width; 22 | dst_cvsize.height = image->height; 23 | dstImage = cvCreateImage( dst_cvsize, image->depth, 4); 24 | 25 | int x; 26 | int y; 27 | uchar r, g, b; 28 | for (y = 0; y < image->height; y++) { 29 | uchar *ptrSrc = (uchar*)(image->imageData + y * image->widthStep); 30 | uchar *ptrDst = (uchar*)(dstImage->imageData + y * dstImage->widthStep); 31 | for (x = 0; x < image->width; x++) { 32 | r = ptrSrc[3 * x]; 33 | g = ptrSrc[3 * x + 1]; 34 | b = ptrSrc[3 * x + 2]; 35 | ptrDst[4 * x] = r; 36 | ptrDst[4 * x + 1] = g; 37 | ptrDst[4 * x + 2] = b; 38 | if (255 == r && 255 == g && 255 == b) { 39 | ptrDst[4 * x + 3] = 0; 40 | } 41 | else { 42 | ptrDst[4 * x + 3] = 255; 43 | } 44 | } 45 | } 46 | cvSaveImage("test.png", dstImage); 47 | } 48 | 49 | #endif //FFMPEG_VIDEO_OVERLAY_TRANS_UTIL_H 50 | -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | project(ffmpeg-video-transcoder) 4 | 5 | set(TARGET trans_decoder) 6 | set(CMAKE_BUILD_TYPE RELEASE) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O3 -fPIC -std=c++11") 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fPIC") 10 | 11 | include_directories(third/ffmpeg/include) 12 | include_directories(third/opencv/include) 13 | link_directories(${CMAKE_SOURCE_DIR}/third/ffmpeg/lib) 14 | link_directories(${CMAKE_SOURCE_DIR}/third/opencv/lib) 15 | 16 | set(OPENCV_LIBS opencv_imgproc opencv_highgui opencv_contrib opencv_core) 17 | set(FFMPEG_LIBS avformat avfilter avcodec avutil postproc swresample swscale vpx x264 x265 fdk-aac mp3lame opus) 18 | set(SYSTEM_LIBS ssl crypto pthread z bz2 m dl freetype) 19 | 20 | file(GLOB_RECURSE sources src/*.[ch]pp) 21 | list(REMOVE_ITEM sources ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 22 | 23 | #编译成可执行文件 24 | add_executable(${TARGET} ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cpp) 25 | 26 | #添加链接库 27 | target_link_libraries(${TARGET} PUBLIC ${OPENCV_LIBS} ${FFMPEG_LIBS} ${SYSTEM_LIBS}) 28 | -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/README.md: -------------------------------------------------------------------------------- 1 | 该模块的能力是将视频流转码、推流。 2 | 3 | 运行时,输入解码数量和视频资源地址两个参数即可。对了,想要测试直播流地址的话。可以找下热门的直播网站,在直播间通过浏览器F12开发者工具抓取直播视频流地址即可,快捷方便。 4 | 5 | 编译和运行方式,依赖ffmpeg、opencv两个第三方库,需要自行编译和安装到third目录。 6 | 7 | cd ffmpeg-video-transcoder 8 | mkdir build 9 | mkdir third (depend on ffmpeg & opencv) 10 | cd build 11 | cmake ../ 12 | make 13 | ./pusher_decoder http://.... rtsp://.... 14 | 15 | -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/src/decoder.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "decoder.h" 4 | #include "time_util.h" 5 | 6 | using namespace std; 7 | 8 | void Decoder::decode(std::string source, std::string desc) { 9 | 10 | decodeId++; 11 | decodeCount = 0; 12 | 13 | int ret; 14 | if ((ret = initDecode(source)) < 0) { 15 | cout << "init decode fail :" << ret << endl; 16 | return; 17 | } 18 | 19 | if ((ret = initEncode(desc)) < 0) { 20 | cout << "init encode fail :" << ret << endl; 21 | return; 22 | } 23 | 24 | // if ((ret = initFilter()) < 0) { 25 | // cout << "init filter fail :" << ret << endl; 26 | // return; 27 | // } 28 | 29 | if ((ret = decoding()) < 0) { 30 | cout << "decoding fail :" << ret << endl; 31 | return; 32 | } 33 | 34 | closeDecode(); 35 | } 36 | 37 | int Decoder::initDecode(std::string source) { 38 | 39 | av_register_all(); 40 | 41 | int ret; 42 | if ((ret = avformat_network_init()) != 0) { 43 | cout << "avformat_network_init failed, ret: " << ret << endl; 44 | return ret; 45 | } 46 | 47 | ret = avformat_open_input(&pFormatCtx, source.c_str(), nullptr, &pAvDict); 48 | if (ret != 0) { 49 | cout << "avformat_open_input failed, ret: " << ret << endl; 50 | return ret; 51 | } 52 | 53 | ret = avformat_find_stream_info(pFormatCtx, nullptr); 54 | if (ret < 0) { 55 | cout << "avformat_find_stream_info failed, ret: " << ret << endl; 56 | return ret; 57 | } 58 | 59 | if ((ret = findVideoStreamIndex()) < 0) { 60 | cout << "findVideoStreamIndex failed, ret: " << ret << endl; 61 | return ret; 62 | } 63 | 64 | // Get a pointer to the codec context for the video stream 65 | pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; 66 | 67 | // Find the decoder for the video stream 68 | AVCodec *pCodec = nullptr; 69 | pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); 70 | if(pCodec == NULL) { 71 | cout << "Unsupported codec!" << endl; 72 | return Constat::system_error; // Codec not found 73 | } 74 | 75 | // Copy context 76 | pCodecCtx = avcodec_alloc_context3(pCodec); 77 | if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { 78 | cout << "Couldn't copy codec context!" << ret << endl; 79 | return Constat::system_error; 80 | } 81 | 82 | // Open codec 83 | if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { 84 | cout << "Could not open codec!" << ret << endl; 85 | return Constat::system_error; 86 | } 87 | 88 | if(pCodecCtx->framerate.den > 0) { 89 | int fps = (int)(pCodecCtx->framerate.num / pCodecCtx->framerate.den); 90 | if(decodeFps == 0) { 91 | decodeFps = fps; 92 | } 93 | skip = fps / decodeFps; 94 | } 95 | 96 | // Allocate video frame 97 | pFrame=av_frame_alloc(); 98 | 99 | // Allocate an AVFrame structure 100 | pFrameRGB=av_frame_alloc(); 101 | 102 | // Allocate an AVFrame structure 103 | pFrameOut=av_frame_alloc(); 104 | 105 | // Determine required buffer size and allocate buffer 106 | imgWidth = pCodecCtx->width; 107 | imgHeight = pCodecCtx->height; 108 | imgSize = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 109 | buffer = (uint8_t *)av_malloc(imgSize * sizeof(uint8_t)); 110 | 111 | // Assign appropriate parts of buffer to image planes in pFrameRGB 112 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 113 | // of AVPicture 114 | avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, imgWidth, imgHeight); 115 | 116 | // initialize SWS context for software scaling 117 | sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, 118 | pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 119 | AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr); 120 | 121 | cout << "open stream succ, source: " << source << ", find video stream idx: " << videoStream 122 | << ", width: " << imgWidth << ", height: " << imgHeight << ", size: " << imgSize << ", skip: " << skip 123 | << ", decodeCount: " << decodeCount << endl; 124 | return Constat::ok; 125 | } 126 | 127 | int Decoder::initEncode(std::string desc) { 128 | 129 | int ret; 130 | if((ret = avformat_alloc_output_context2(&outFormatCtx, NULL, "rtsp", desc.c_str())) < 0) { 131 | cout << "avformat_alloc_output_context2 failed, ret: " << ret << endl; 132 | return Constat::system_error; 133 | } 134 | 135 | outFormat = outFormatCtx->oformat; 136 | for (int i = 0; i < pFormatCtx->nb_streams; i++) { 137 | 138 | //Create output AVStream according to input AVStream 139 | AVStream *in_stream = pFormatCtx->streams[i]; 140 | AVStream *out_stream = avformat_new_stream(outFormatCtx, in_stream->codec->codec); 141 | if (!out_stream) { 142 | cout << "Failed allocating output stream " << endl; 143 | return Constat::system_error; 144 | } 145 | 146 | //Copy the settings of AVCodecContext 147 | ret = avcodec_copy_context(out_stream->codec, in_stream->codec); 148 | if (in_stream->codec->codec_id == AV_CODEC_ID_NONE) { 149 | out_stream->codec->codec_id = AV_CODEC_ID_AAC; 150 | } 151 | 152 | if (ret < 0) { 153 | cout << "Failed to copy context from input to output stream codec context!" << endl; 154 | return Constat::system_error; 155 | } 156 | 157 | out_stream->codec->codec_tag = 0; 158 | if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) 159 | out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 160 | } 161 | 162 | //Dump Format------------------ 163 | av_dump_format(outFormatCtx, 0, desc.c_str(), 1); 164 | 165 | if (!(outFormat->flags & AVFMT_NOFILE)) { 166 | ret = avio_open(&outFormatCtx->pb, desc.c_str(), AVIO_FLAG_WRITE); 167 | if (ret < 0) { 168 | cout << "Could not open output URL: " << desc << endl; 169 | return ret; 170 | } 171 | } 172 | 173 | av_dict_set(&dict, "rtsp_transport","tcp",0); 174 | av_dict_set(&dict, "muxdelay", "0.1", 0); 175 | 176 | outFormatCtx->audio_codec_id = outFormatCtx->oformat->audio_codec; 177 | outFormatCtx->video_codec_id = outFormatCtx->oformat->video_codec; 178 | ret = avformat_write_header(outFormatCtx, &dict); 179 | if (ret < 0) { 180 | cout << "Error occurred when opening output URL: " << desc << endl; 181 | return ret; 182 | } 183 | 184 | cout << "init encode succ !!!" << endl; 185 | 186 | return Constat::ok; 187 | } 188 | 189 | int Decoder::initFilter() { 190 | 191 | avfilter_register_all(); 192 | 193 | char args[512]; 194 | AVFilter *buffersrc = avfilter_get_by_name("buffer"); 195 | AVFilter *buffersink = avfilter_get_by_name("buffersink"); 196 | AVFilterInOut *outputs = avfilter_inout_alloc(); 197 | AVFilterInOut *inputs = avfilter_inout_alloc(); 198 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; 199 | 200 | /* buffer video source: the decoded frames from the decoder will be inserted here. */ 201 | filterGraph = avfilter_graph_alloc(); 202 | snprintf(args, sizeof(args), 203 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 204 | pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 205 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, 206 | pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den); 207 | 208 | int ret = avfilter_graph_create_filter(&buffersrcCtx, buffersrc, "in", args, NULL, filterGraph); 209 | if (ret < 0) { 210 | cout << "Cannot create buffer source!" << endl; 211 | return ret; 212 | } 213 | 214 | /* buffer video sink: to terminate the filter chain. */ 215 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 216 | buffersink_params->pixel_fmts = pix_fmts; 217 | ret = avfilter_graph_create_filter(&buffersinkCtx, buffersink, "out", NULL, buffersink_params, filterGraph); 218 | av_free(buffersink_params); 219 | if (ret < 0) { 220 | cout << "Cannot create buffer sink!" << endl; 221 | return ret; 222 | } 223 | 224 | /* Endpoints for the filter graph. */ 225 | outputs->name = av_strdup("in"); 226 | outputs->filter_ctx = buffersrcCtx; 227 | outputs->pad_idx = 0; 228 | outputs->next = NULL; 229 | 230 | inputs->name = av_strdup("out"); 231 | inputs->filter_ctx = buffersinkCtx; 232 | inputs->pad_idx = 0; 233 | inputs->next = NULL; 234 | 235 | if ((ret = avfilter_graph_parse_ptr(filterGraph, filtersDesc, &inputs, &outputs, NULL)) < 0){ 236 | cout << "avfilter_graph_parse_ptr failed, ret: " << ret << endl; 237 | return ret; 238 | } 239 | 240 | if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0) { 241 | cout << "avfilter_graph_config failed, ret: " << ret << endl; 242 | return ret; 243 | } 244 | 245 | filterCtx = filterGraph->filters[2]; 246 | return Constat::ok; 247 | } 248 | 249 | int Decoder::findVideoStreamIndex() { 250 | for (size_t i = 0; i < pFormatCtx->nb_streams; i++) { 251 | if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 252 | videoStream = i; 253 | return 0; 254 | } 255 | } 256 | return Constat::system_error; 257 | } 258 | 259 | 260 | int Decoder::decoding() { 261 | 262 | AVPacket packet; 263 | av_init_packet(&packet); 264 | 265 | start_time = av_gettime(); 266 | while(1) { 267 | 268 | AVStream *in_stream, *out_stream; 269 | 270 | int ret = av_read_frame(pFormatCtx, &packet); 271 | if (ret != 0) { 272 | if (ret == AVERROR_EOF) { 273 | cout << "av_read_frame failed, ret: " << ret << endl; 274 | } else { 275 | cout << "av_read_frame ret eof!" << endl; 276 | } 277 | break; 278 | } 279 | 280 | //FIX:No PTS (Example: Raw H.264) 281 | //如果容器没有提供pts/dts,则需要填充它们 282 | if (packet.pts == AV_NOPTS_VALUE) { 283 | AVRational time_base = pFormatCtx->streams[videoStream]->time_base; 284 | //Duration between 2 frames (us) 285 | int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate); 286 | //Parameters 287 | packet.pts = (double)(frame_index * calc_duration) / (double)(av_q2d(time_base)*AV_TIME_BASE); 288 | packet.dts = packet.pts; 289 | packet.duration = (double)calc_duration / (double)(av_q2d(time_base)*AV_TIME_BASE); 290 | } 291 | 292 | //Important:Delay 延迟 293 | // if (packet.stream_index == videoStream) { 294 | // AVRational time_base = pFormatCtx->streams[videoStream]->time_base; 295 | // AVRational time_base_q = { 1, AV_TIME_BASE }; 296 | // int64_t pts_time = av_rescale_q(packet.dts, time_base, time_base_q); 297 | // int64_t now_time = av_gettime() - start_time; 298 | // if (pts_time > now_time) { 299 | // av_usleep(pts_time - now_time); 300 | // } 301 | // } 302 | 303 | if (packet.stream_index != videoStream) { 304 | av_packet_unref(&packet); 305 | continue; 306 | } 307 | 308 | in_stream = pFormatCtx->streams[packet.stream_index]; 309 | out_stream = outFormatCtx->streams[packet.stream_index]; 310 | 311 | /* copy packet */ 312 | packet.pos = -1; 313 | packet.pts = av_rescale_q_rnd(packet.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 314 | packet.dts = av_rescale_q_rnd(packet.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 315 | packet.duration = av_rescale_q(packet.duration, in_stream->time_base, out_stream->time_base); 316 | if (packet.stream_index == videoStream) { 317 | frame_index++; 318 | } 319 | 320 | ret = av_interleaved_write_frame(outFormatCtx, &packet); 321 | if (ret < 0) { 322 | cout << "error muxing packet, ret:" << ret << endl; 323 | break; 324 | } 325 | 326 | av_packet_unref(&packet); 327 | } 328 | 329 | //Write file trailer 330 | av_write_trailer(outFormatCtx); 331 | 332 | return Constat::ok; 333 | } 334 | 335 | void Decoder::saveImage(std::string filename) { 336 | cv::Mat mat; 337 | mat.create(cv::Size(imgWidth, imgHeight), CV_8UC3); 338 | mat.data = buffer; 339 | if (!cv::imwrite(filename, mat)) { 340 | cout<< "saveImage failed, filename: " << filename << ", pts: " << pts << endl; 341 | } else { 342 | cout<< "saveImage success, filename: " << filename << ", pts: " << pts << endl; 343 | } 344 | } 345 | 346 | std::string Decoder::getImageName() { 347 | time_t ts = time(NULL); 348 | std::stringstream ss; 349 | ss << "/tmp/"; 350 | ss << decodeId; 351 | ss << "_"; 352 | ss << ts; 353 | ss << "_"; 354 | ss << decodeFrameNum; 355 | ss << "."; 356 | ss << "jpg"; 357 | return ss.str(); 358 | } 359 | 360 | void Decoder::closeDecode() { 361 | 362 | avfilter_graph_free(&filterGraph); 363 | 364 | av_frame_free(&pFrameOut); 365 | av_frame_free(&pFrameRGB); 366 | av_frame_free(&pFrame); 367 | 368 | avcodec_close(pCodecCtx); 369 | avcodec_free_context(&pCodecCtx); 370 | 371 | avcodec_close(pCodecCtxOrig); 372 | //avcodec_free_context(&pCodecCtxOrig); //Mustn't free 373 | 374 | //avfilter_free(filterCtx); 375 | 376 | avformat_close_input(&pFormatCtx); 377 | av_dict_free(&pAvDict); 378 | } -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/src/decoder.h: -------------------------------------------------------------------------------- 1 | #ifndef HELLO_DECODE_DECODE_H 2 | #define HELLO_DECODE_DECODE_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #ifdef __cplusplus 19 | } 20 | #endif 21 | 22 | #include 23 | #include 24 | 25 | class Decoder { 26 | 27 | public: 28 | void decode(std::string, std::string); 29 | 30 | private: 31 | int initDecode(std::string); 32 | int initEncode(std::string); 33 | int initFilter(); 34 | int findVideoStreamIndex(); 35 | int decoding(); 36 | std::string getImageName(); 37 | void saveImage(std::string); 38 | void closeDecode(); 39 | 40 | private: 41 | 42 | int videoStream = -1; 43 | AVDictionary *pAvDict = nullptr; 44 | AVFormatContext *pFormatCtx = nullptr; 45 | AVCodecContext *pCodecCtxOrig = nullptr; 46 | AVCodecContext *pCodecCtx = nullptr; 47 | SwsContext *sws_ctx = nullptr; 48 | 49 | AVDictionary *dict = nullptr; 50 | AVFormatContext *outFormatCtx = nullptr; 51 | AVOutputFormat *outFormat = nullptr; 52 | int frame_index = 0; 53 | int64_t start_time = 0; 54 | 55 | AVFrame *pFrame = nullptr; 56 | AVFrame *pFrameRGB = nullptr; 57 | AVFrame *pFrameOut = nullptr; 58 | uint8_t *buffer = nullptr; 59 | 60 | int imgWidth = 0; 61 | int imgHeight = 0; 62 | int imgSize = 0; 63 | int decodeFps = 1; 64 | int skip = 0; 65 | int pts = 0; 66 | 67 | int decodeId = 0; 68 | int decodeCount = 0; 69 | int decodeFrameNum = 0; 70 | 71 | AVFilterGraph *filterGraph; 72 | AVFilterContext *buffersrcCtx; 73 | AVFilterContext *buffersinkCtx; 74 | AVFilterContext *filterCtx; 75 | 76 | char *filtersDesc = "drawtext=fontfile=/usr/share/fonts/dejavu/DejaVuSans.ttf:fontcolor=blue:x=300:y=300:fontsize=45:text='hello avfilter'"; 77 | 78 | }; 79 | 80 | enum Constat { 81 | ok = 0, 82 | system_error = -1 83 | }; 84 | 85 | #endif //HELLO_DECODE_DECODE_H 86 | -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "decoder.h" 3 | 4 | using namespace std; 5 | 6 | /** 7 | * ./pusher_decoder http://......... rtsp://localhost:554/... 8 | * 9 | * @param argc 10 | * @param argv 11 | * @return 12 | */ 13 | int main(int argc, char** argv) { 14 | 15 | if (argc <= 2) { 16 | cout << "please input valid arguments." << endl; 17 | return 0; 18 | } 19 | 20 | Decoder d; 21 | d.decode(argv[1], argv[2]); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /ffmpeg-video-transcoder/src/time_util.h: -------------------------------------------------------------------------------- 1 | #ifndef FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 2 | #define FFMPEG_VIDEO_AVFILTER_TIME_UTIL_H 3 | 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | string getFormatTime() 9 | { 10 | time_t now_time; 11 | char buf[64] = {0}; 12 | now_time=time(NULL); 13 | strftime(buf, 128,"%Y-%m-%d %H:%M:%S", localtime(&now_time)); 14 | return buf; 15 | } 16 | 17 | #endif --------------------------------------------------------------------------------