├── CMakeLists.txt ├── README.md ├── example ├── CMakeLists.txt ├── avformat │ ├── AVFormatInput.cpp │ ├── AVStream.cpp │ └── CMakeLists.txt ├── avutil │ ├── AVAudioFifo.cpp │ ├── AVBuffer.cpp │ ├── AVDictionary.cpp │ ├── AVFifoBuffer.cpp │ ├── AVFrame.cpp │ ├── AVLog.cpp │ ├── AVMath.cpp │ ├── AVOption.cpp │ ├── AVRational.cpp │ ├── AVSample.cpp │ ├── CMakeLists.txt │ └── avutil_common.h └── example.cpp ├── installFFmpeg.sh └── src ├── audio_convert_tool.h ├── audio_filter_aformat_output_pcm.h ├── audio_filter_aresample_output_pcm.h ├── av_util_avclass_avoption_test.h ├── av_util_dictionary_test.h ├── avframe_util.cpp ├── avframe_util.h ├── codecimpl.cpp ├── codecimpl.h ├── cut_mp4_test.h ├── decode_audio_mix_output_pcm.test.h ├── decode_audio_output_pcm_test.h ├── decode_h264_test.h ├── decode_h265_test.h ├── decode_heic_output_yuv420_test.h ├── decode_video_output_one_image.h ├── decode_video_output_yuv420_test.h ├── encode_frames_yuv420p_output_heif.h ├── encode_oneframe_yuv420p10le_output_heif.h ├── encode_oneframe_yuv420p_output_heif.h ├── encode_video_output_10bith265_test.h ├── encode_video_output_h264_test.h ├── encode_video_output_h265_test.h ├── encode_video_output_mp4_test.h ├── exec_ffmpeg_test.h ├── ffmpeg_test_main.cpp ├── generate_gif_test.h ├── global.h ├── merge_2mp4_output_mp4.h ├── merge_image_test.h ├── merge_yuv420_test.h ├── pw_truecut_hdr.h ├── pw_truecut_yuv_helper.h ├── remuxing_test.h ├── resample_audio_test.h ├── separate_mp4_output_audio_video_mp4.h ├── truecut_tcif.cpp ├── truecut_tcif.h ├── video_avfilter_test.h ├── video_filter_tool.h └── yuv_transfer_test.h /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #CMake version 2 | CMAKE_MINIMUM_REQUIRED (VERSION 2.8.5) 3 | 4 | #project information 5 | PROJECT(FFMPEG) 6 | 7 | if(UNIX) 8 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -std=c++14 -fPIC") 9 | endif() 10 | 11 | SET(PRONAME ffmpeg_test) 12 | SET(LIBTCIFNAME truecut_tcif) 13 | 14 | #target 15 | ADD_EXECUTABLE(${PRONAME} src/ffmpeg_test_main.cpp 16 | src/avframe_util.cpp 17 | src/codecimpl.cpp) 18 | 19 | ADD_LIBRARY(${LIBTCIFNAME} src/truecut_tcif.cpp) 20 | 21 | SET(FFMPEG_INSTALL /home/caiyu/install) 22 | SET(X264_INSTALL /home/caiyu/install) 23 | SET(X265_INSTALL /home/caiyu/install) 24 | SET(LAME_INSTALL /home/caiyu/install) 25 | SET(HEIF_ROOT /home/caiyu/source/heif) 26 | SET(EXIV2_INSTALL /home/caiyu/install) 27 | 28 | INCLUDE_DIRECTORIES(${FFMPEG_INSTALL}/include 29 | ${HEIF_ROOT}/srcs/api/common 30 | ${HEIF_ROOT}/srcs/api/reader 31 | ${HEIF_ROOT}/srcs/api/writer 32 | ${HEIF_ROOT}/srcs/api-cpp 33 | ${EXIV2_INSTALL}/include 34 | ) 35 | 36 | find_library(AVFILTER_PATH avfilter ${FFMPEG_INSTALL}/lib) 37 | find_library(AVFORMAT_PATH avformat ${FFMPEG_INSTALL}/lib) 38 | find_library(POSTPROC_PATH postproc ${FFMPEG_INSTALL}/lib) 39 | find_library(SWSCALE_PATH swscale ${FFMPEG_INSTALL}/lib) 40 | find_library(SWRESAMPLE_PATH swresample ${FFMPEG_INSTALL}/lib) 41 | find_library(AVCODEC_PATH avcodec ${FFMPEG_INSTALL}/lib) 42 | find_library(AVUTIL_PATH avutil ${FFMPEG_INSTALL}/lib) 43 | find_library(X264_PATH x264 ${X264_INSTALL}/lib) 44 | find_library(X265_PATH x265 ${X265_INSTALL}/lib) 45 | find_library(LAME_PATH mp3lame ${LAME_INSTALL}/lib) 46 | find_library(HEIF_READER heif_static ${HEIF_ROOT}/build/lib) 47 | find_library(HEIF_WRITER heif_writer_static ${HEIF_ROOT}/build/lib) 48 | find_library(HEIFPP heifpp ${HEIF_ROOT}/build/lib) 49 | find_library(EXIV2 exiv2 ${EXIV2_INSTALL}/lib) 50 | 51 | #add_library(x265 52 | # STATIC 53 | # IMPORTED) 54 | #set_target_properties(x265 55 | # PROPERTIES IMPORTED_LOCATION 56 | # ${X265_INSTALL}/lib/libx265.so) 57 | 58 | 59 | SET(LIBS ${AVFILTER_PATH} 60 | ${AVFORMAT_PATH} 61 | ${POSTPROC_PATH} 62 | ${SWSCALE_PATH} 63 | ${AVCODEC_PATH} 64 | ${SWRESAMPLE_PATH} 65 | ${AVUTIL_PATH} 66 | z 67 | lzma 68 | stdc++ 69 | m 70 | rt 71 | ${X264_PATH} 72 | ${X265_PATH} 73 | dl 74 | numa 75 | ${EXIV2} 76 | ${LAME_PATH} 77 | ${LIBTCIFNAME} 78 | ${HEIFPP} 79 | ${HEIF_READER} 80 | ${HEIF_WRITER} 81 | pthread) 82 | 83 | #link lib 84 | TARGET_LINK_LIBRARIES(${PRONAME} ${LIBS}) 85 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ffmpeg_test 2 | 学习ffmpeg各种API,并简单编写相应demo以供参考 3 | -------------------------------------------------------------------------------- /example/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | ADD_SUBDIRECTORY(avutil) 2 | ADD_SUBDIRECTORY(avformat) -------------------------------------------------------------------------------- /example/avformat/AVFormatInput.cpp: -------------------------------------------------------------------------------- 1 | //此例学习打开文件(demux),以及获取文件中相关流信息,结构体AVFormatContext中的AVInputFormat跟demux直接相关,如果要自己写解析文件的部分就是需要实现相关的读取函数 2 | //avformat_open_input 打开文件函数 3 | //avformat_close_input 关闭文件函数 4 | //avformat_find_stream_info, 查找所有流类型 5 | //av_find_best_stream, 查找对应的流类型 6 | 7 | extern "C"{ 8 | #include 9 | } 10 | #include 11 | 12 | void AVFormatInput_Example(){ 13 | const char* fileName = "/home/caiyu/VID_20210703_171254.mp4"; 14 | AVFormatContext* avFmtCtx = NULL; 15 | int videoIndex = -1; 16 | int audioIndex = -1; 17 | int ret = -1; 18 | AVPacket* avpkt = av_packet_alloc(); 19 | 20 | if(avformat_open_input(&avFmtCtx, fileName, NULL, NULL) < 0){ 21 | goto end; 22 | } 23 | //这个函数必须先执行才能av_find_best_stream 24 | avformat_find_stream_info(avFmtCtx, NULL); 25 | 26 | std::cout << "此媒体文件有:" << avFmtCtx->nb_streams << "条AVStream \n"; 27 | 28 | videoIndex = av_find_best_stream(avFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); 29 | 30 | audioIndex = av_find_best_stream(avFmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 31 | 32 | std::cout << "视频streamIndex:" << videoIndex << ",音频streamIndex:" << audioIndex << std::endl; 33 | 34 | 35 | while(!(ret = av_read_frame(avFmtCtx, avpkt))){ 36 | if(avpkt->stream_index == videoIndex){ 37 | std::cout << "读取到视频包, pts = " << avpkt->pts << std::endl; 38 | }else if(avpkt->stream_index == audioIndex){ 39 | std::cout << "读取到音频包, pts = " << avpkt->pts << std::endl; 40 | }else{ 41 | std::cout << "读取到其他数据包, streamIndex =" << avpkt->stream_index <<", pts = " << avpkt->pts << std::endl; 42 | } 43 | } 44 | end: 45 | if(avFmtCtx != NULL){ 46 | avformat_close_input(&avFmtCtx); 47 | } 48 | av_packet_free(&avpkt); 49 | } -------------------------------------------------------------------------------- /example/avformat/AVStream.cpp: -------------------------------------------------------------------------------- 1 | //此例主要学会AVStream结构体以及相关api, AVStream中最重要的变量是AVCodecParamter记录流媒体的相关编码信息 2 | //AVStream在mux下有创建函数avformat_new_stream 3 | 4 | extern "C"{ 5 | #include 6 | } 7 | #include 8 | 9 | void AVStream_Example(){ 10 | const char* ifilename = "/home/caiyu/VID_20210703_171254.mp4"; 11 | const char* ovfilename = "/home/caiyu/VID_20210703_171254_only_video.mp4"; 12 | const char* oafilename = "/home/caiyu/VID_20210703_171254_only_audio.mp4"; 13 | AVFormatContext* ifmt_ctx = NULL;//必须要赋值为NULL,否则avformat_open_input失败 14 | AVFormatContext* ovfmt_ctx = NULL; 15 | AVFormatContext* oafmt_ctx = NULL; 16 | int videoIndex = -1; 17 | int audioIndex = -1; 18 | AVStream* ov_stream = NULL; 19 | AVStream* oa_stream = NULL; 20 | int ret = 0; 21 | AVPacket pkt; 22 | if(avformat_open_input(&ifmt_ctx, ifilename, NULL, NULL) < 0){ 23 | goto end; 24 | } 25 | avformat_find_stream_info(ifmt_ctx, NULL); 26 | 27 | for(int i=0; inb_streams; i++){ 28 | AVStream* s = ifmt_ctx->streams[i]; 29 | if(s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ 30 | videoIndex = i; 31 | std::cout << "视频宽:" << s->codecpar->width << ",视频高:" << s->codecpar->height << ",视频码率" << s->codecpar->bit_rate << std::endl; 32 | }else if(s->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){ 33 | audioIndex = i; 34 | std::cout << "音频采样率:" << s->codecpar->sample_rate << "音频类型" << s->codecpar->format << "声道数:" << s->codecpar->channels << std::endl; 35 | } 36 | } 37 | 38 | if(avformat_alloc_output_context2(&ovfmt_ctx, NULL, NULL, ovfilename) < 0){ 39 | goto end; 40 | } 41 | 42 | ov_stream = avformat_new_stream(ovfmt_ctx, NULL); 43 | 44 | avcodec_parameters_copy(ov_stream->codecpar, ifmt_ctx->streams[videoIndex]->codecpar); 45 | ov_stream->codecpar->codec_tag = 0; 46 | 47 | if(!ovfmt_ctx->pb && !(ovfmt_ctx->flags & AVFMT_NOFILE)){ 48 | avio_open(&ovfmt_ctx->pb, ovfilename, AVIO_FLAG_WRITE); 49 | } 50 | 51 | avformat_write_header(ovfmt_ctx, NULL); 52 | 53 | 54 | if(avformat_alloc_output_context2(&oafmt_ctx, NULL, NULL, oafilename) < 0){ 55 | goto end; 56 | } 57 | 58 | oa_stream = avformat_new_stream(oafmt_ctx, NULL); 59 | avcodec_parameters_copy(oa_stream->codecpar, ifmt_ctx->streams[audioIndex]->codecpar); 60 | oa_stream->codecpar->codec_tag = 0; 61 | 62 | if(!oafmt_ctx->pb && !(oafmt_ctx->flags & AVFMT_NOFILE)){ 63 | ret = avio_open(&oafmt_ctx->pb, oafilename, AVIO_FLAG_WRITE); 64 | if(ret < 0){ 65 | goto end; 66 | } 67 | } 68 | 69 | if(avformat_write_header(oafmt_ctx, NULL) < 0){ 70 | goto end; 71 | } 72 | 73 | while(1){ 74 | ret = av_read_frame(ifmt_ctx, &pkt); 75 | if(ret < 0){ 76 | break; 77 | } 78 | if(pkt.stream_index == videoIndex){ 79 | pkt.stream_index = ov_stream->index; 80 | ret = av_interleaved_write_frame(ovfmt_ctx, &pkt); 81 | }else if(pkt.stream_index == audioIndex){ 82 | //此处是个坑,如果不对index重新赋值,就会在出现错误 83 | // if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) { 84 | // av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n", 85 | // pkt->stream_index); 86 | // return AVERROR(EINVAL); 87 | // } 88 | pkt.stream_index = oa_stream->index; 89 | ret = av_interleaved_write_frame(oafmt_ctx, &pkt); 90 | } 91 | av_packet_unref(&pkt); 92 | 93 | if(ret < 0){ 94 | goto end; 95 | } 96 | } 97 | 98 | av_write_trailer(ovfmt_ctx); 99 | 100 | av_write_trailer(oafmt_ctx); 101 | 102 | end: 103 | if(ifmt_ctx != NULL){ 104 | avformat_close_input(&ifmt_ctx); 105 | } 106 | if(ovfmt_ctx){ 107 | if(ovfmt_ctx->pb && !(ovfmt_ctx->flags & AVFMT_NOFILE)){ 108 | avio_closep(&ovfmt_ctx->pb); 109 | } 110 | avformat_free_context(ovfmt_ctx); 111 | ovfmt_ctx = NULL; 112 | } 113 | if(oafmt_ctx != NULL){ 114 | if(oafmt_ctx->pb){ 115 | avio_closep(&oafmt_ctx->pb); 116 | } 117 | avformat_free_context(oafmt_ctx); 118 | oafmt_ctx = NULL; 119 | } 120 | } -------------------------------------------------------------------------------- /example/avformat/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #source 2 | AUX_SOURCE_DIRECTORY(. DIR_LIB_SRCS) 3 | #生成静态库 4 | ADD_LIBRARY(avformat_example ${DIR_LIB_SRCS}) 5 | -------------------------------------------------------------------------------- /example/avutil/AVAudioFifo.cpp: -------------------------------------------------------------------------------- 1 | //AVAudioFifo相关的api是实现音频解码的数据结构,是基于AVFifoBuffer实现的二次封装 2 | // struct AVAudioFifo { 3 | // AVFifoBuffer **buf; /**< single buffer for interleaved, per-channel buffers for planar */ 4 | // int nb_buffers; /**< number of buffers */ 5 | // int nb_samples; /**< number of samples currently in the FIFO */ 6 | // int allocated_samples; /**< current allocated size, in samples */ 7 | 8 | // int channels; /**< number of channels */ 9 | // enum AVSampleFormat sample_fmt; /**< sample format */ 10 | // int sample_size; /**< size, in bytes, of one sample in a buffer */ 11 | // }; 12 | 13 | //av_audio_fifo_alloc初始化函数, av_audio_fifo_realloc再次分配函数, av_audio_fifo_free释放函数 14 | //av_audio_fifo_read读取数据函数, av_audio_fifo_write写入数据函数 15 | //av_audio_fifo_peek/peek_at 仅读取数据不丢弃数据 16 | //av_audio_fifo_drain 丢弃数据函数 17 | //av_audio_fifo_space 可存储samples大小, av_audio_fifo_size当前存储samples大小 18 | 19 | extern "C"{ 20 | #include 21 | #include 22 | } 23 | #include 24 | 25 | void AVAudioFifo_Example(){ 26 | AVAudioFifo* av_audio_fifo = av_audio_fifo_alloc(AV_SAMPLE_FMT_S16, 2, 10240); 27 | std::cout << "av_audio_fifo_alloc: " << av_audio_fifo_space(av_audio_fifo) << std::endl; 28 | av_audio_fifo_realloc(av_audio_fifo, 20480); 29 | std::cout << "av_audio_fifo_realloc: " << av_audio_fifo_space(av_audio_fifo) << std::endl; 30 | 31 | int channels = 2; 32 | int samples = 1024; 33 | 34 | int size = av_samples_get_buffer_size(NULL, channels, samples, AV_SAMPLE_FMT_S16, 0); 35 | int planes = av_sample_fmt_is_planar(AV_SAMPLE_FMT_S16) ? channels : 1; 36 | uint8_t** buffer = (uint8_t**)av_mallocz_array(planes, sizeof(uint8_t*)); 37 | for(int i=0; i 8 | #include 9 | } 10 | 11 | #include 12 | #include 13 | 14 | static void free_avbuffer(void* opaque, uint8_t* data){ 15 | AVBufferRef* avbuffer = (AVBufferRef*)opaque; 16 | std::cout << (char*)avbuffer->data << "," << avbuffer->size << std::endl; 17 | av_buffer_unref(&avbuffer); 18 | av_freep(&data); 19 | av_freep(&avbuffer); 20 | } 21 | 22 | void AVBuffer_Example(){ 23 | AVBufferRef* avbuffer = av_buffer_allocz(100); 24 | if(av_buffer_is_writable(avbuffer)){ 25 | const char* s = "test avbuffer"; 26 | strcpy((char*)avbuffer->data, s); 27 | avbuffer->size = strlen(s); 28 | } 29 | 30 | uint8_t* data = (uint8_t*)av_mallocz(100); 31 | AVBufferRef* avbuffer2 = av_buffer_create(data, 100, free_avbuffer, avbuffer, 0); 32 | av_buffer_unref(&avbuffer2); 33 | 34 | AVBufferPool* avbufferPool = av_buffer_pool_init(100, NULL); 35 | 36 | AVBufferRef* avbuffer3 = av_buffer_pool_get(avbufferPool); 37 | std::cout << "avbuffer3->size = "<< avbuffer3->size << std::endl; 38 | 39 | av_buffer_pool_uninit(&avbufferPool); 40 | } -------------------------------------------------------------------------------- /example/avutil/AVDictionary.cpp: -------------------------------------------------------------------------------- 1 | //此例是为了学会AVDictionary结构体赋值,获取值,以及利用它对AVClass对象赋值 2 | //主要学会av_dict_set_xxx、av_dict_get_xxx、av_dict_copy、av_dict_count等函数 3 | //学会使用av_opt_set_dict2函数赋值 4 | #include "avutil_common.h" 5 | #include 6 | 7 | extern "C"{ 8 | #include 9 | } 10 | 11 | const AVOption stuInfoAVOption[] = { 12 | {.name = "stuName", .help = "姓名", .offset = offsetof(struct StuInfo, _stuName), .type = AV_OPT_TYPE_STRING, {.str = "张xx"}, .min = 0.0, .max = 0.0, .flags = 0, .unit = NULL}, 13 | {"stuNumber", "学号", offsetof(struct StuInfo, _stuNo), AV_OPT_TYPE_UINT64, {.i64 = 100000}, 0.0, 999999999.0, 0, NULL}, 14 | {"classInfo", "班级", offsetof(struct StuInfo, _classInfo), AV_OPT_TYPE_STRING, {.str = "张江xx学校"}, 0.0, 0.0, 0}, 15 | {NULL} 16 | }; 17 | 18 | static AVClass stuInfo_avcls = { 19 | .class_name = "AVOption stuInfo example", 20 | .item_name = av_default_item_name, //这个函数实质上就是获取上边的class_name 21 | .option = stuInfoAVOption, //默认参数 22 | .version = LIBAVUTIL_VERSION_INT 23 | }; 24 | 25 | 26 | const AVOption scoreInfoAVOption[] = { 27 | {"chinese", "语文成绩", offsetof(struct ScoreInfo, _chinese), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0, NULL}, 28 | {"math", "数学成绩", offsetof(struct ScoreInfo, _math), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0}, 29 | {"english", "英语成绩", offsetof(struct ScoreInfo, _english), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0}, 30 | {NULL} //此处必须以{NULL}结束,否则会出现赋值错误问题 31 | }; 32 | 33 | 34 | static void* score_info_next(void *obj, void *prev){ 35 | ScoreInfo* scoreInfo = (ScoreInfo*)obj; 36 | if(!prev && scoreInfo->opaque){ 37 | return scoreInfo->opaque; 38 | } 39 | return NULL; 40 | } 41 | 42 | static const AVClass* socre_info_child_class_iterate(void **iter){ 43 | const AVClass *c = *iter ? NULL : &stuInfo_avcls; 44 | *iter = (void*)(uintptr_t)c; 45 | return c; 46 | } 47 | 48 | //C++初始化AVClass成员变量不能乱序 49 | static AVClass scoreInfo_avcls = { 50 | .class_name = "AVOption scoreInfo example", 51 | .item_name = av_default_item_name, //这个函数实质上就是获取上边的class_name 52 | .option = scoreInfoAVOption, //默认参数 53 | .version = LIBAVUTIL_VERSION_INT, 54 | .log_level_offset_offset = 0, 55 | .parent_log_context_offset = 0, 56 | .child_next = score_info_next, 57 | .category = AV_CLASS_CATEGORY_NA, 58 | .get_category = av_default_get_category, 59 | .query_ranges = av_opt_query_ranges_default, 60 | .child_class_iterate = socre_info_child_class_iterate 61 | }; 62 | 63 | void printDict(AVDictionary* dict){ 64 | AVDictionaryEntry* entry = NULL; 65 | //key不能赋值为NULL 66 | while((entry = av_dict_get(dict, "", entry, AV_DICT_IGNORE_SUFFIX)) != NULL){ 67 | std::cout << "key:" << entry->key << " value:" << entry->value << std::endl; 68 | } 69 | } 70 | 71 | void AVDictionary_Example(){ 72 | AVDictionary *dict = NULL; 73 | av_dict_set(&dict, "chinese", "120", 0); 74 | av_dict_set(&dict, "math", "100.5", 0); 75 | av_dict_set(&dict, "english", "130", 0); 76 | av_dict_set(&dict, "physical", "89", 0); 77 | 78 | std::cout << "dict count:" << av_dict_count(dict); 79 | 80 | AVDictionary* scoreInfoDict = NULL; 81 | av_dict_copy(&scoreInfoDict, dict, 0); 82 | 83 | av_dict_free(&dict); 84 | 85 | printDict(scoreInfoDict); 86 | 87 | AVDictionary* stuInfoDict = NULL; 88 | av_dict_set(&stuInfoDict, "stuName", "小菜菜", 0); 89 | av_dict_set(&stuInfoDict, "stuNumber", "1238888", 0); 90 | av_dict_set(&stuInfoDict, "classInfo", "三年级9班", 0); 91 | av_dict_set(&stuInfoDict, "fakeInfo", "xxxxx", 0); 92 | 93 | ScoreInfo* scoreInfo = (ScoreInfo*)av_mallocz(sizeof(ScoreInfo)); 94 | scoreInfo->_cls = &scoreInfo_avcls; 95 | StuInfo* stuInfo = (StuInfo*)av_mallocz(sizeof(StuInfo)); 96 | stuInfo->_cls = &stuInfo_avcls; 97 | scoreInfo->opaque = stuInfo; 98 | //当obj中没有找到对应的属性,对应的dict会返回未赋值的项,且原来的dict被释放 99 | av_opt_set_dict(scoreInfo->opaque, &stuInfoDict); 100 | 101 | std::cout << "输出stuInfoDict未赋值的key->value" << std::endl; 102 | printDict(stuInfoDict); 103 | 104 | av_opt_set_dict(scoreInfo, &scoreInfoDict); 105 | std::cout << "输出scoreInfoDict未赋值的key->value" << std::endl; 106 | printDict(scoreInfoDict); 107 | 108 | std::cout << "输出赋值后的信息" << std::endl; 109 | printfScoreInfo(scoreInfo); 110 | 111 | av_opt_free(scoreInfo->opaque); 112 | av_opt_free(scoreInfo); 113 | av_freep(&scoreInfo); 114 | } -------------------------------------------------------------------------------- /example/avutil/AVFifoBuffer.cpp: -------------------------------------------------------------------------------- 1 | //此例主要研究ffmpeg先进先出的数据结构AVFifoBuffer 2 | // typedef struct AVFifoBuffer { 3 | // uint8_t *buffer; 4 | // uint8_t *rptr, *wptr, *end; 5 | // uint32_t rndx, wndx; 6 | // } AVFifoBuffer; 7 | //使用数组来模拟队列, 这个数据结构最重要的是end = buffer + size(buffersize),所以可以通过end - buffer 得到数据区大小 8 | //rndex表示读取位置, wndex表示写入位置, 这个结构体最厉害之处是对这两个变量永远都只有加法操作,一开始我也不理解,万一越界了呢? 9 | 10 | // unsigned char aread = 120; 11 | // unsigned char awrite = 255; 12 | // awrite += 5; //此时awrite变成了4,加1变成0,加2变成1...... 13 | // unsigned char size = (unsigned char)(awrite - aread); //(unsigned char)(4 - 120) = 140,和目标值相同 14 | 15 | // 4 - 120 = -116 负数的2进制是正数据的反码+1 16 | 17 | // 116的原码: 01110100 18 | // 116的反码: 10001011 19 | // +1所生成的补码:10001100 20 | // 10001100反表示的无符号正数为: 1 * 2^7 + 1 * 2^3 + 1 * 2^2 = 128 + 8 + 4 = 140 21 | 22 | //初始化 av_fifo_alloc/relloc, 释放函数av_fifo_free/freep 23 | //写入函数 av_fifo_generic_write, 读取函数av_fifo_generic_read 24 | //获取缓冲区大小函数 av_fifo_space, 获取缓冲区数据大小av_fifo_size 25 | //另外的读取函数av_fifo_generic_peek_at / av_fifo_generic_peek, 这两个函数也是读取数据,但是读取后并没有丢掉数据rptr还是在原来的位置 26 | //丢掉数据函数av_fifo_drain,最读指针偏移size个位置 27 | 28 | extern "C"{ 29 | #include 30 | } 31 | #include 32 | 33 | void AVFifoBuffer_Example(){ 34 | 35 | AVFifoBuffer* avfifo = av_fifo_alloc(1024); 36 | std::cout << "avfifo: buffer size :" << av_fifo_space(avfifo) << ", data size:" << av_fifo_size(avfifo) << std::endl; 37 | 38 | av_fifo_realloc2(avfifo, 2048); 39 | std::cout << "after realloc avfifo: buffer size :" << av_fifo_space(avfifo) << ", data size:" << av_fifo_size(avfifo) << std::endl; 40 | uint8_t data[] = "1234567890abcdefghijklmn"; 41 | av_fifo_generic_write(avfifo, (void*)data, sizeof(data), NULL); 42 | std::cout << "after write avfifo: buffer size :" << av_fifo_space(avfifo) << ", data size:" << av_fifo_size(avfifo) << std::endl; 43 | uint8_t dest[11] = {'0'}; 44 | av_fifo_generic_read(avfifo, (void*)dest, 10, NULL); 45 | std::cout << "after read avfifo: data size:" << av_fifo_size(avfifo) << ", read data:" << (char*)dest << std::endl; 46 | 47 | av_fifo_generic_peek_at(avfifo, dest, 2, 10, NULL); 48 | std::cout << "after peek at avfifo: data size:" << av_fifo_size(avfifo) << ", read data:" << (char*)dest << std::endl; 49 | 50 | av_fifo_generic_peek(avfifo, dest, 10, NULL); 51 | std::cout << "after peek avfifo: data size:" << av_fifo_size(avfifo) << ", read data:" << (char*)dest << std::endl; 52 | 53 | av_fifo_drain(avfifo, 3); 54 | std::cout << "after drain avfifo: data size:" << av_fifo_size(avfifo) << std::endl; 55 | 56 | av_fifo_freep(&avfifo); 57 | 58 | } 59 | 60 | 61 | -------------------------------------------------------------------------------- /example/avutil/AVFrame.cpp: -------------------------------------------------------------------------------- 1 | //AVFrame是存储编码前/编码后的原始音视频数据的关键数据结构 2 | //创建av_frame_alloc, 释放函数av_frame_free 3 | //浅拷贝函数 av_frame_ref 4 | //快速拷贝函数 av_frame_clone = av_frame_alloc + av_frame_ref 5 | //重置数据存储区av_frame_unref 6 | //移动数据区av_frame_move_ref 7 | //根据avframe中的宽高,samples等信息分配具体数据存储区av_frame_get_buffer 8 | //判断数据区是否可写av_frame_is_writable,和修改可写状态函数av_frame_make_writable 9 | //创建framesidedata函数av_frame_new_side_data/av_frame_new_side_data_from_buf 10 | //获取framesidedata函数av_frame_get_side_data 11 | //移除framesidedata函数av_frame_remove_side_data 12 | 13 | 14 | 15 | extern "C"{ 16 | #include 17 | #include 18 | } 19 | #include 20 | 21 | void AVFrame_Example(){ 22 | AVFrame* avframe = av_frame_alloc(); 23 | //视频编码 24 | { 25 | avframe->width = 352; 26 | avframe->height = 288; 27 | avframe->format = AV_PIX_FMT_YUV420P; 28 | av_frame_get_buffer(avframe, 0); 29 | if(av_frame_is_writable(avframe)){ 30 | //复制数据 31 | std::cout << "复制视频数据" << std::endl; 32 | }else{ 33 | av_frame_make_writable(avframe); 34 | std::cout << "after av_frame_make_writable: 复制视频数据\n"; 35 | } 36 | av_frame_unref(avframe); 37 | } 38 | //音频编码 39 | { 40 | avframe->nb_samples = 1024; 41 | avframe->channel_layout = AV_CH_LAYOUT_MONO; 42 | avframe->format = AV_SAMPLE_FMT_S32; 43 | av_frame_get_buffer(avframe, 0); 44 | if(av_frame_is_writable(avframe)){ 45 | std::cout << "复制音频数据\n"; 46 | }else{ 47 | av_frame_make_writable(avframe); 48 | std::cout << "after av_frame_make_writable: 复制音频数据\n"; 49 | } 50 | av_frame_unref(avframe); 51 | } 52 | av_frame_free(&avframe); 53 | 54 | } -------------------------------------------------------------------------------- /example/avutil/AVLog.cpp: -------------------------------------------------------------------------------- 1 | //此例是为了学会av_log等函数,且理解AVClass中log_level_offset_offset参数的使用 2 | //AV_LOG_XXX中越是重要的日志,level值越小,默认level = AV_LOG_INFO 3 | //通过av_log_set_level可以改变整个运行环境的level, 当通过av_log打印日志时,仅当level的值小于等于设置的值才能正常输出 4 | //也能通过av_log_set_callback修改默认的输出回调 5 | 6 | #include "avutil_common.h" 7 | #include 8 | #include 9 | 10 | struct logTest{ 11 | AVClass* avcls; 12 | int log_level; 13 | }; 14 | 15 | 16 | const AVOption logTestOption[] = { 17 | {"level", "设置日志输出level", offsetof(logTest, log_level), AV_OPT_TYPE_INT, {.i64 = AV_LOG_DEBUG}, AV_LOG_PANIC, AV_LOG_TRACE, 0, NULL}, 18 | {NULL} 19 | }; 20 | 21 | static AVClass logClass{ 22 | .class_name = "log test", 23 | .item_name = av_default_item_name, 24 | .option = logTestOption, 25 | .version = LIBAVUTIL_VERSION_INT, 26 | .log_level_offset_offset = offsetof(logTest, log_level) //当有这个值时且av_log设置的level >= AV_LOG_FATAL 27 | //level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset); 28 | }; 29 | 30 | static std::mutex mtx; 31 | 32 | static void av_log_custom_callback(void *avcl, int level, const char *fmt, va_list vl){ 33 | if(level > av_log_get_level()){ 34 | return; 35 | } 36 | mtx.lock(); 37 | printf("av_log_custom_callback{level:%d, default_log_level:%d}\n", level, av_log_get_level()); 38 | mtx.unlock(); 39 | } 40 | 41 | void AVLog_Example(){ 42 | void* logObj = av_mallocz(sizeof(logTest)); 43 | *(AVClass**)logObj = &logClass; 44 | av_opt_set_defaults(logObj); 45 | 46 | int64_t level; 47 | av_opt_get_int(logObj, "level", 0, &level); 48 | std::cout << "logTest设置的默认level:" << level << std::endl; 49 | 50 | av_log(logObj, AV_LOG_FATAL, "日志输出输出level:%ld-------1\n", level); 51 | 52 | av_log_set_level(AV_LOG_TRACE); 53 | 54 | av_log(logObj, AV_LOG_FATAL, "日志输出输出level:%ld--------2\n", level); 55 | 56 | av_log_set_callback(av_log_custom_callback); 57 | 58 | av_log(logObj, AV_LOG_FATAL, "日志输出输出level:%ld--------3\n", level); 59 | } -------------------------------------------------------------------------------- /example/avutil/AVMath.cpp: -------------------------------------------------------------------------------- 1 | //此例为了学会两个类型的api, a × b / c 对结果不同的函数av_rescale_rnd 2 | //对两个不周pts做比较的函数 pts1 × time_base1 < pts2 x time_base2 ? -1 :(pts1 x time_base1 == pts2 x time_base2 ? 0 : 1) av_compare_ts 3 | //这两个函数在容器转换, 音频文件和视频合成时非常重要 4 | 5 | extern "C"{ 6 | #include 7 | } 8 | 9 | #include 10 | 11 | void AVMath_Example(){ 12 | //av_rescale_rnd是所有av_rescale_xxx函数的具体实体函数 13 | //av_rescale_q ->av_rescale_q_rnd - > av_rescale_rnd 14 | //av_rescale ->av_rescale_rnd 15 | 16 | std::cout << "av_rescale_q_rnd(1, AVRational{1, 25}, AVRational{1, 90000}, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX):" 17 | << av_rescale_q_rnd(1, AVRational{1, 25}, AVRational{1, 90000}, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)) << std::endl; 18 | 19 | std::cout <<"av_compare_ts(1, AVRational{1, 25}, 50, AVRational{1, 48000}):" 20 | << av_compare_ts(1, AVRational{1, 25}, 50, AVRational{1, 48000}) << std::endl; 21 | } -------------------------------------------------------------------------------- /example/avutil/AVOption.cpp: -------------------------------------------------------------------------------- 1 | //本例子主要测试三类函数 av_opt_set、av_opt_get、av_opt_set_default 2 | //带有AVClass的子成员赋值、获取值,获取 AV_OPT_SEARCH_CHILDREN 标识 3 | 4 | #include "avutil_common.h" 5 | #include 6 | 7 | const AVOption stuInfoAVOption[] = { 8 | {.name = "stuName", .help = "姓名", .offset = offsetof(struct StuInfo, _stuName), .type = AV_OPT_TYPE_STRING, {.str = "张xx"}, .min = 0.0, .max = 0.0, .flags = 0, .unit = NULL}, 9 | {"stuNumber", "学号", offsetof(struct StuInfo, _stuNo), AV_OPT_TYPE_UINT64, {.i64 = 100000}, 0.0, 999999999.0, 0, NULL}, 10 | {"classInfo", "班级", offsetof(struct StuInfo, _classInfo), AV_OPT_TYPE_STRING, {.str = "张江xx学校"}, 0.0, 0.0, 0}, 11 | {NULL} 12 | }; 13 | 14 | static AVClass stuInfo_avcls = { 15 | .class_name = "AVOption stuInfo example", 16 | .item_name = av_default_item_name, //这个函数实质上就是获取上边的class_name 17 | .option = stuInfoAVOption, //默认参数 18 | .version = LIBAVUTIL_VERSION_INT 19 | }; 20 | 21 | 22 | const AVOption scoreInfoAVOption[] = { 23 | {"chinese", "语文成绩", offsetof(struct ScoreInfo, _chinese), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0, NULL}, 24 | {"math", "数学成绩", offsetof(struct ScoreInfo, _math), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0}, 25 | {"english", "英语成绩", offsetof(struct ScoreInfo, _english), AV_OPT_TYPE_FLOAT, {.dbl = 60.0}, 0.0, 150.0, 0}, 26 | {NULL} //此处必须以{NULL}结束,否则会出现赋值错误问题 27 | }; 28 | 29 | 30 | static void* score_info_next(void *obj, void *prev){ 31 | ScoreInfo* scoreInfo = (ScoreInfo*)obj; 32 | if(!prev && scoreInfo->opaque){ 33 | return scoreInfo->opaque; 34 | } 35 | return NULL; 36 | } 37 | 38 | static const AVClass* socre_info_child_class_iterate(void **iter){ 39 | const AVClass *c = *iter ? NULL : &stuInfo_avcls; 40 | *iter = (void*)(uintptr_t)c; 41 | return c; 42 | } 43 | 44 | //C++初始化AVClass成员变量不能乱序 45 | static AVClass scoreInfo_avcls = { 46 | .class_name = "AVOption scoreInfo example", 47 | .item_name = av_default_item_name, //这个函数实质上就是获取上边的class_name 48 | .option = scoreInfoAVOption, //默认参数 49 | .version = LIBAVUTIL_VERSION_INT, 50 | .log_level_offset_offset = 0, 51 | .parent_log_context_offset = 0, 52 | .child_next = score_info_next, 53 | .category = AV_CLASS_CATEGORY_NA, 54 | .get_category = NULL, 55 | .query_ranges = NULL, 56 | .child_class_iterate = socre_info_child_class_iterate 57 | }; 58 | 59 | //通过av_opt_get获取对象值 60 | void printfScoreInfo(void* scoreInfo){ 61 | uint8_t* out = NULL; 62 | if(av_opt_get(scoreInfo, "stuName", AV_OPT_SEARCH_CHILDREN, &out)>=0){ 63 | std::cout << "姓名:" << out << std::endl; 64 | av_freep(&out); 65 | } 66 | if(av_opt_get(scoreInfo, "stuNumber", AV_OPT_SEARCH_CHILDREN, &out) >= 0){ 67 | std::cout << "学号:" << strtol((char*)out, NULL, 10) << std::endl; 68 | av_freep(&out); 69 | } 70 | if(av_opt_get(scoreInfo, "classInfo", AV_OPT_SEARCH_CHILDREN, &out)>=0){ 71 | std::cout << "班级:" << out << std::endl; 72 | av_freep(&out); 73 | } 74 | double marks; 75 | if(av_opt_get_double(scoreInfo, "chinese", 0, &marks) >= 0){ 76 | std::cout << "语文成绩:" << marks << std::endl; 77 | } 78 | if(av_opt_get_double(scoreInfo, "math", 0, &marks) >= 0){ 79 | std::cout << "数学成绩:" << marks << std::endl; 80 | } 81 | 82 | if(av_opt_get_double(scoreInfo, "english", 0, &marks) >= 0){ 83 | std::cout << "英语成绩:" << marks << std::endl; 84 | } 85 | } 86 | 87 | //通过av_opt_set_defaults设置默认参数 88 | static void* set_default_info(){ 89 | struct ScoreInfo* scoreInfo = (struct ScoreInfo*)av_mallocz(sizeof(struct ScoreInfo)); 90 | scoreInfo->_cls = &scoreInfo_avcls; 91 | struct StuInfo* stuInfo = (struct StuInfo*)av_mallocz(sizeof(struct StuInfo)); 92 | scoreInfo->opaque = stuInfo; 93 | stuInfo->_cls = &stuInfo_avcls; 94 | av_opt_set_defaults(scoreInfo->opaque); 95 | av_opt_set_defaults(scoreInfo); 96 | return scoreInfo; 97 | } 98 | 99 | //通过av_opt_frees释放对象 100 | void freeScoreInfo(void** scoreInfo){ 101 | av_opt_free(*scoreInfo); //释放ScoreInfo内部分配的内存,如此对象的_strName 102 | 103 | void* opaque = ((ScoreInfo*)(*scoreInfo))->opaque; 104 | if( opaque != NULL){ 105 | av_opt_free(opaque); 106 | } 107 | av_freep(scoreInfo); 108 | } 109 | //通过av_opt_set族函数设值 110 | static void set_name(void* ScoreInfo, const char* name){ 111 | av_opt_set(ScoreInfo, "stuName", name, AV_OPT_SEARCH_CHILDREN); 112 | } 113 | //设置学号 114 | static void set_stuNo(void* ScoreInfo, uint64_t no){ 115 | av_opt_set_int(ScoreInfo, "stuNumber", no, AV_OPT_SEARCH_CHILDREN); 116 | } 117 | 118 | //设置班级 119 | static void set_stuClass(void* ScoreInfo, const char* classInfo){ 120 | av_opt_set(ScoreInfo, "classInfo", classInfo, AV_OPT_SEARCH_CHILDREN); 121 | } 122 | 123 | //通过av_opt_set族函数设值 124 | static void set_chinese_marks(void* ScoreInfo, float marks){ 125 | av_opt_set_double(ScoreInfo, "chinese", marks, 0); 126 | } 127 | //通过av_opt_set族函数设值 128 | static void set_math_marks(void* ScoreInfo, float marks){ 129 | av_opt_set_double(ScoreInfo, "math", marks, 0); 130 | } 131 | //通过av_opt_set族函数设值 132 | static void set_english_marks(void* ScoreInfo, float marks){ 133 | av_opt_set_double(ScoreInfo, "english", marks, 0); 134 | } 135 | 136 | 137 | void AVOption_Example(){ 138 | void* obj = set_default_info(); 139 | printfScoreInfo(obj); 140 | set_name(obj, "蔡菜"); 141 | set_stuNo(obj, 13818888); 142 | set_stuClass(obj, "张江大学"); 143 | set_chinese_marks(obj, 120.5); 144 | set_math_marks(obj, 125.5); 145 | set_english_marks(obj, 140.5); 146 | printfScoreInfo(obj); 147 | freeScoreInfo(&obj); 148 | } 149 | 150 | -------------------------------------------------------------------------------- /example/avutil/AVRational.cpp: -------------------------------------------------------------------------------- 1 | //此例主要学会创建AVRational、转成double、倒数,double转AVRational等 2 | //该对象在从文件-》解码-》显示/ 编码-》写入文件等场景有非常多的运用 3 | //av_make_q 、av_q2d、 av_d2q、 av_inv_q等 4 | 5 | extern "C"{ 6 | #include 7 | } 8 | #include 9 | 10 | void AVRational_Example(){ 11 | 12 | AVRational o = av_make_q(1, 25); 13 | 14 | std::cout << "av_q2d(av_make_q(1, 25)) = " << av_q2d(o) << std::endl; 15 | 16 | AVRational o2 = av_inv_q(o); 17 | 18 | std::cout << "av_inv_q(av_make_q(1, 25)): num = " << o2.num << ", denominator = " << o2.den << std::endl; 19 | 20 | double d = 1.25; 21 | 22 | AVRational o3 = av_d2q(d, 100); 23 | 24 | std::cout << "av_d2q(1.25, 100) : num = " << o3.num << ", denominator = " << o3.den << std::endl; 25 | } -------------------------------------------------------------------------------- /example/avutil/AVSample.cpp: -------------------------------------------------------------------------------- 1 | //通过此例我们主要学会音频av_sample_xxx相关api 2 | //判断当前avsample是否是planar(线性) av_sample_fmt_is_planar 3 | //最重要的两个api: 获取单个sample的大小av_get_bytes_per_sample 和 获取多个sample需要buffer size av_samples_get_buffer_size 4 | //填充音频buffer缓冲的函数av_samples_fill_arrays,这个函数内存只是内存指针的地址赋值,并没有分配内存 5 | //填充静音字符av_samples_set_silence 6 | 7 | extern "C"{ 8 | #include 9 | } 10 | #include 11 | 12 | void AVSample_Example(){ 13 | std::cout << "AV_SAMPLE_FMT_DBLP is planar:" << av_sample_fmt_is_planar(AV_SAMPLE_FMT_DBLP) << std::endl; 14 | 15 | int samples = 1024; 16 | int channel = 2; 17 | int perSampleSize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16P); 18 | std::cout << "AV_SAMPLE_FMT_S16P single sample size is:" << perSampleSize << std::endl; 19 | 20 | int linesize; 21 | int bufferSize = av_samples_get_buffer_size(&linesize, channel, samples, AV_SAMPLE_FMT_S16P, 0); 22 | 23 | std::cout << "av_samples_get_buffer_size : linesize = " << linesize << ", buffer size = " << bufferSize << std::endl; 24 | 25 | uint8_t* pAudio[2]; 26 | uint8_t* audioData = (uint8_t*)av_malloc(bufferSize); 27 | av_samples_fill_arrays(pAudio, &linesize, audioData, channel, samples, AV_SAMPLE_FMT_S16P, 0); 28 | 29 | av_samples_set_silence(pAudio, 0, samples, channel, AV_SAMPLE_FMT_S16P); 30 | 31 | av_freep(&audioData); 32 | } -------------------------------------------------------------------------------- /example/avutil/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #source 2 | AUX_SOURCE_DIRECTORY(. DIR_LIB_SRCS) 3 | #生成静态库 4 | ADD_LIBRARY(avutil_example ${DIR_LIB_SRCS}) 5 | -------------------------------------------------------------------------------- /example/avutil/avutil_common.h: -------------------------------------------------------------------------------- 1 | #ifndef AVUTIL_COMMON_H_H_ 2 | #define AVUTIL_COMMON_H_H_ 3 | 4 | extern "C"{ 5 | #include 6 | } 7 | 8 | struct StuInfo{ 9 | AVClass* _cls; 10 | char *_stuName; //姓名 11 | uint64_t _stuNo; //学号 12 | char* _classInfo; //班级 13 | }; 14 | 15 | 16 | struct ScoreInfo{ 17 | AVClass* _cls; 18 | void* opaque; //私有信息,指向StuInfo 19 | float _chinese; //语文 20 | float _math; //数学 21 | float _english;//英语 22 | }; 23 | 24 | void printfScoreInfo(void* scoreInfo); 25 | 26 | void freeScoreInfo(void** scoreInfo); 27 | 28 | #endif -------------------------------------------------------------------------------- /example/example.cpp: -------------------------------------------------------------------------------- 1 | extern void AVOption_Example(); 2 | extern void AVDictionary_Example(); 3 | extern void AVLog_Example(); 4 | extern void AVRational_Example(); 5 | extern void AVBuffer_Example(); 6 | extern void AVSample_Example(); 7 | extern void AVFifoBuffer_Example(); 8 | extern void AVAudioFifo_Example(); 9 | extern void AVFrame_Example(); 10 | extern void AVMath_Example(); 11 | ////////////////////////////////////////////// 12 | extern void AVFormatInput_Example(); 13 | extern void AVStream_Example(); 14 | int main(){ 15 | AVOption_Example(); 16 | AVDictionary_Example(); 17 | AVLog_Example(); 18 | AVRational_Example(); 19 | AVBuffer_Example(); 20 | AVSample_Example(); 21 | AVFifoBuffer_Example(); 22 | AVAudioFifo_Example(); 23 | AVFrame_Example(); 24 | AVMath_Example(); 25 | 26 | AVFormatInput_Example(); 27 | AVStream_Example(); 28 | return 0; 29 | } -------------------------------------------------------------------------------- /installFFmpeg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | ROOT=`pwd` 6 | INSTALL_DIR=$ROOT/libdeps_install 7 | LIBDEPS_SOURCE=$ROOT/libdeps 8 | if [ ! -d $LIBDEPS_SOURCE ]; then 9 | mkdir ${LIBDEPS_SOURCE} 10 | fi 11 | 12 | #install hg: sudo apt-get install mercurial 13 | #install numa: apt-get install -y libnuma-dev 14 | 15 | install_lame(){ 16 | echo "---------begin install lamemp3--------------" 17 | cd ${LIBDEPS_SOURCE} 18 | dir=lame-3.100 19 | url=https://nchc.dl.sourceforge.net/project/lame/lame/3.100/lame-3.100.tar.gz 20 | if [ ! -d $dir ]; then 21 | curl -O ${url} 22 | tar -xzvf lame-3.100.tar.gz 23 | fi 24 | cd ${dir} 25 | ./configure --prefix=${INSTALL_DIR} 26 | make -j4 27 | make install 28 | } 29 | 30 | 31 | install_x264(){ 32 | echo "-------------begin install x264---------------" 33 | cd ${LIBDEPS_SOURCE} 34 | dir=x264 35 | url=https://code.videolan.org/videolan/x264.git 36 | if [ ! -d $dir ]; then 37 | git clone ${url} ${dir} 38 | fi 39 | cd ${dir} 40 | ./configure --prefix=${INSTALL_DIR} --disable-asm --enable-shared --bit-depth=all --enable-pic 41 | make -j4 42 | make install 43 | } 44 | 45 | install_x265(){ 46 | echo "-------------begin install x265---------------" 47 | cd ${LIBDEPS_SOURCE} 48 | dir=x265 49 | url=http://hg.videolan.org/x265 50 | if [ ! -d $dir ]; then 51 | hg clone ${url} ${dir} 52 | fi 53 | cd ${dir}/build/linux 54 | #参考 x265/build/linux/multilib.sh 55 | mkdir -p 8bit 10bit 12bit 56 | cd 12bit 57 | cmake ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF -DMAIN12=ON 58 | make ${MAKEFLAGS} 59 | 60 | cd ../10bit 61 | cmake ../../../source -DHIGH_BIT_DEPTH=ON -DEXPORT_C_API=OFF -DENABLE_SHARED=OFF -DENABLE_CLI=OFF 62 | make ${MAKEFLAGS} 63 | 64 | cd ../8bit 65 | ln -sf ../10bit/libx265.a libx265_main10.a 66 | ln -sf ../12bit/libx265.a libx265_main12.a 67 | cmake ../../../source -DEXTRA_LIB="x265_main10.a;x265_main12.a" -DEXTRA_LINK_FLAGS=-L. -DLINKED_10BIT=ON -DLINKED_12BIT=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} 68 | make ${MAKEFLAGS} 69 | 70 | # rename the 8bit library, then combine all three into libx265.a 71 | 72 | mv libx265.a libx265_main.a 73 | uname=`uname` 74 | 75 | if [ "$uname" = "Linux" ]; then 76 | # On Linux, we use GNU ar to combine the static libraries together 77 | ar -M </dev/null 90 | fi 91 | make install 92 | } 93 | 94 | install_FFmpeg(){ 95 | echo "-------------begin install ffmpeg---------------" 96 | cd ${LIBDEPS_SOURCE} 97 | dir=ffmepg 98 | url=https://git.ffmpeg.org/ffmpeg.git 99 | if [ ! -d $dir ]; then 100 | git clone ${url} ${dir} 101 | fi 102 | cd ${dir} 103 | echo "install ${INSTALL_DIR}" 104 | export PKG_CONFIG_PATH=${INSTALL_DIR}/lib/pkgconfig:$PKG_CONFIG_PATH 105 | ./configure --prefix=${INSTALL_DIR} \ 106 | --enable-gpl \ 107 | --enable-version3 \ 108 | --enable-nonfree \ 109 | --enable-libx264 \ 110 | --enable-libx265 \ 111 | --enable-libmp3lame \ 112 | --enable-pic \ 113 | --disable-asm \ 114 | --extra-cflags=-I${INSTALL_DIR}/include \ 115 | --extra-ldflags=-L${INSTALL_DIR}/lib \ 116 | --extra-libs='-lstdc++ -lm -lrt -ldl -lpthread' 117 | make -j4 118 | make install 119 | } 120 | 121 | install_lame 122 | install_x264 123 | install_x265 124 | install_FFmpeg 125 | -------------------------------------------------------------------------------- /src/audio_convert_tool.h: -------------------------------------------------------------------------------- 1 | #ifndef AUDIO_CONVERT_TOOL_H_H_ 2 | #define AUDIO_CONVERT_TOOL_H_H_ 3 | 4 | #include "global.h" 5 | /** 6 | * 这个类实现了音频PCM数据从一种格式到另一种格式的转换,包括(采样率、声道数、单个采样的存储格式AVSampleFormat) 7 | */ 8 | class SwrCtxManager{ 9 | public: 10 | /** 11 | * src_channel_layout: 输入音频数据的声道数,如单声道、双声道等 12 | * src_sample_rate:输入音频数据的采样率,如44100Hz,16000Hz 13 | * src_sample_format:输入音频的存储格式,如AV_SAMPLE_FMT_S16 14 | * dest_channel_layout: 输出音频数据的声道数,如单声道、双声道等 15 | * dest_sample_rate:输出音频数据的采样率,如44100Hz,16000Hz 16 | * dest_sample_format:输出音频的存储格式,如AV_SAMPLE_FMT_FLTP 17 | */ 18 | SwrCtxManager(int64_t src_channel_layout, int32_t src_sample_rate, AVSampleFormat src_sample_format, 19 | int64_t dest_channel_layout, int32_t dest_sample_rate, AVSampleFormat dest_sample_format) 20 | :swr_ctx(nullptr), 21 | m_src_channel_layout(src_channel_layout), 22 | m_src_sample_rate(src_sample_rate), 23 | m_src_sample_format(src_sample_format), 24 | m_dest_channel_layout(dest_channel_layout), 25 | m_dest_sample_rate(dest_sample_rate), 26 | m_dest_sample_format(dest_sample_format), 27 | m_max_dst_nb_samples(0), 28 | m_dst_nb_channels(0), 29 | m_dest_data(nullptr){ 30 | } 31 | /** 32 | * 初始化 33 | */ 34 | bool Init(){ 35 | swr_ctx = swr_alloc(); 36 | if(swr_ctx == nullptr){ 37 | return false; 38 | } 39 | av_opt_set_int(swr_ctx, "in_channel_layout", m_src_channel_layout, 0); 40 | av_opt_set_int(swr_ctx, "in_sample_rate", m_src_sample_rate, 0); 41 | av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", m_src_sample_format, 0); 42 | 43 | av_opt_set_int(swr_ctx, "out_channel_layout", m_dest_channel_layout, 0); 44 | av_opt_set_int(swr_ctx, "out_sample_rate", m_dest_sample_rate, 0); 45 | av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", m_dest_sample_format, 0); 46 | 47 | /* initialize the resampling context */ 48 | if (swr_init(swr_ctx) < 0) { 49 | fprintf(stderr, "Failed to initialize the resampling context\n"); 50 | return false; 51 | } 52 | m_dst_nb_channels = av_get_channel_layout_nb_channels(m_dest_channel_layout); 53 | return true; 54 | } 55 | /** 56 | * 转换格式实现 57 | * src: 待转换的音频采样,格式与与初始化时传入的参数保持一致 58 | * int: 样本数 59 | * return: <0 error, >=0成功 60 | */ 61 | int Convert(const uint8_t** src, int in_count){ 62 | //计算可能输出的样本数 63 | int dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, m_src_sample_rate) + 64 | in_count, m_dest_sample_rate, m_src_sample_rate, AV_ROUND_UP); 65 | if(m_max_dst_nb_samples == 0){//首次分配内存 66 | int dst_linesize; 67 | av_samples_alloc_array_and_samples(&m_dest_data, &dst_linesize, m_dst_nb_channels, dst_nb_samples, m_dest_sample_format, 0); 68 | m_max_dst_nb_samples = dst_nb_samples; 69 | }else if(m_max_dst_nb_samples < dst_nb_samples){//当内存不够重新分配内存 70 | av_freep(&m_dest_data[0]); 71 | int dst_linesize; 72 | if(av_samples_alloc(m_dest_data, &dst_linesize, m_dst_nb_channels, dst_nb_samples, m_dest_sample_format, 0) < 0){ 73 | av_freep(&m_dest_data[0]); 74 | free(m_dest_data); 75 | m_dest_data = nullptr; 76 | return -1; 77 | } 78 | m_max_dst_nb_samples = dst_nb_samples; 79 | } 80 | //调用转换函数实现转换 81 | return swr_convert(swr_ctx, m_dest_data, dst_nb_samples, src, in_count); 82 | } 83 | /** 84 | * 当转换成功后,通过这个接口获取转换后的数据 85 | */ 86 | const uint8_t ** GetConvertedBuffer() const{ 87 | return (const uint8_t **)m_dest_data; 88 | } 89 | ~SwrCtxManager(){ 90 | if(swr_ctx){ 91 | swr_free(&swr_ctx); 92 | } 93 | if(m_dest_data){ 94 | av_freep(&m_dest_data[0]); 95 | } 96 | av_free(m_dest_data); 97 | } 98 | private: 99 | SwrContext *swr_ctx; 100 | int64_t m_src_channel_layout; 101 | int64_t m_dest_channel_layout; 102 | int32_t m_src_sample_rate; 103 | int32_t m_dest_sample_rate; 104 | AVSampleFormat m_src_sample_format; 105 | AVSampleFormat m_dest_sample_format; 106 | int m_max_dst_nb_samples; 107 | int m_dst_nb_channels; 108 | uint8_t **m_dest_data; 109 | }; 110 | 111 | #endif -------------------------------------------------------------------------------- /src/audio_filter_aformat_output_pcm.h: -------------------------------------------------------------------------------- 1 | #ifndef AUDIO_FILTER_AFORMAT_OUTPUT_PCM_H_H_ 2 | #define AUDIO_FILTER_AFORMAT_OUTPUT_PCM_H_H_ 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | #include "avframe_util.h" 7 | 8 | void audio_filter_aformat_test(){ 9 | const char* input_file = "./V90405-190106.mp4"; 10 | AVFormatContext * inputFormatContext = NULL; 11 | if(avformat_open_input(&inputFormatContext, input_file, NULL, NULL) < 0){ 12 | return; 13 | } 14 | int audio_index = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 15 | if(audio_index < 0){ 16 | return; 17 | } 18 | AVStream* stream = inputFormatContext->streams[audio_index]; 19 | const AVCodec *avcodec = avcodec_find_decoder(stream->codecpar->codec_id); 20 | AVCodecContext* avcodec_ctx = avcodec_alloc_context3(avcodec); 21 | avcodec_parameters_to_context(avcodec_ctx, stream->codecpar); 22 | if(avcodec_open2(avcodec_ctx, avcodec, NULL) < 0){ 23 | return; 24 | } 25 | 26 | AVFilterGraph* filtergraph = avfilter_graph_alloc(); 27 | 28 | /*const AVFilter* srcFilter = avfilter_get_by_name("abuffer"); 29 | AVFilterContext* srcFilterCtx = NULL; 30 | char in_args[512]; 31 | snprintf(in_args, sizeof(in_args), 32 | "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRId64, 33 | avcodec_ctx->time_base.num, avcodec_ctx->time_base.den, avcodec_ctx->sample_rate, 34 | av_get_sample_fmt_name(avcodec_ctx->sample_fmt), 35 | avcodec_ctx->channel_layout); 36 | printf("音频信息:%s\n", in_args); 37 | avfilter_graph_create_filter(&srcFilterCtx, srcFilter, "src", in_args, NULL, filtergraph); 38 | 39 | const AVFilter* sinkFilter = avfilter_get_by_name("abuffersink"); 40 | AVFilterContext* sinkFilterCtx = NULL; 41 | static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; 42 | static const int64_t out_channel_layouts[] = {AV_CH_LAYOUT_MONO, -1 }; 43 | static const int out_sample_rates[] = {32000, -1 }; 44 | int ret = avfilter_graph_create_filter(&sinkFilterCtx, sinkFilter, "out", NULL, NULL, filtergraph); 45 | if (ret < 0) { 46 | av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); 47 | 48 | } 49 | ret = av_opt_set_int_list(sinkFilterCtx, "sample_fmts", out_sample_fmts, -1, 50 | AV_OPT_SEARCH_CHILDREN); 51 | if (ret < 0) { 52 | av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n"); 53 | 54 | } 55 | ret = av_opt_set_int_list(sinkFilterCtx, "channel_layouts", out_channel_layouts, -1, 56 | AV_OPT_SEARCH_CHILDREN); 57 | if (ret < 0) { 58 | av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); 59 | } 60 | ret = av_opt_set_int_list(sinkFilterCtx, "sample_rates", out_sample_rates, -1, 61 | AV_OPT_SEARCH_CHILDREN); 62 | if (ret < 0) { 63 | av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); 64 | }*/ 65 | AVFilterContext* srcFilterCtx = NULL; 66 | int ret = InitABufferFilter(filtergraph, &srcFilterCtx, "src", avcodec_ctx->time_base, avcodec_ctx->sample_rate, 67 | avcodec_ctx->sample_fmt, avcodec_ctx->channel_layout); 68 | 69 | static const AVSampleFormat pre_mix_sample_fmt = AV_SAMPLE_FMT_S16; 70 | static const int64_t pre_mix_channel_layout = AV_CH_LAYOUT_MONO; 71 | static const int pre_sample_rate = 32000; 72 | 73 | AVFilterContext* sinkFilterCtx = NULL; 74 | ret = InitABufferSinkFilter(filtergraph, &sinkFilterCtx, "out", pre_mix_sample_fmt, pre_sample_rate, pre_mix_channel_layout); 75 | 76 | 77 | // const AVFilter* aresample = avfilter_get_by_name("aresample"); 78 | // snprintf(in_args, sizeof(in_args),"%d", pre_sample_rate); 79 | // AVFilterContext* aresampleFilterCtx = NULL; 80 | // ret = avfilter_graph_create_filter(&aresampleFilterCtx, aresample, "aformat", in_args, NULL, filtergraph); 81 | 82 | 83 | 84 | const AVFilter* aformat = avfilter_get_by_name("aformat"); 85 | char in_args[512]; 86 | snprintf(in_args, sizeof(in_args), 87 | "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRId64, 88 | av_get_sample_fmt_name(pre_mix_sample_fmt), pre_sample_rate, pre_mix_channel_layout); 89 | AVFilterContext* aformatFilterCtx = NULL; 90 | ret = avfilter_graph_create_filter(&aformatFilterCtx, aformat, "aformat", in_args, NULL, filtergraph); 91 | 92 | // ret = avfilter_link(srcFilterCtx, 0, aresampleFilterCtx, 0); 93 | // ret = avfilter_link(aresampleFilterCtx, 0, aformatFilterCtx, 0); 94 | ret = avfilter_link(srcFilterCtx, 0, aformatFilterCtx, 0); 95 | ret = avfilter_link(aformatFilterCtx, 0, sinkFilterCtx, 0); 96 | // ret = avfilter_link(aresampleFilterCtx, 0, sinkFilterCtx, 0); 97 | 98 | ret = avfilter_graph_config(filtergraph, NULL); 99 | if(ret < 0) 100 | return; 101 | 102 | 103 | FILE* pcmFile = fopen("./aformat.pcm", "wb"); 104 | 105 | AVPacket avpkt; 106 | av_init_packet(&avpkt); 107 | AVFrame *filt_frame = av_frame_alloc(); 108 | bool exit = false; 109 | while(!exit){ 110 | ret = av_read_frame(inputFormatContext, &avpkt); 111 | if(ret == 0){ 112 | if(avpkt.stream_index == audio_index){ 113 | ret = decode(avcodec_ctx, &avpkt, [&](AVCodecContext* ctx, const AVFrame* avframe){ 114 | AVFrame* inframe = av_frame_clone(avframe); 115 | if (av_buffersrc_add_frame_flags(srcFilterCtx, inframe, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 116 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 117 | } 118 | av_frame_unref(inframe); 119 | }); 120 | if(ret < 0){ 121 | printf("11111 decode avpkt1 failed\n"); 122 | } 123 | } 124 | av_packet_unref(&avpkt); 125 | 126 | }else{ 127 | ret = decode(avcodec_ctx, nullptr, [&](AVCodecContext* ctx, const AVFrame* avframe){ 128 | AVFrame* inframe = av_frame_clone(avframe); 129 | if (av_buffersrc_add_frame_flags(srcFilterCtx, inframe, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 130 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 131 | } 132 | av_frame_unref(inframe); 133 | }); 134 | exit = true;; 135 | } 136 | 137 | while (1) {//从buffersink设备上下文获取视频帧 138 | ret = av_buffersink_get_frame(sinkFilterCtx, filt_frame); 139 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 140 | break; 141 | } 142 | if (ret < 0){ 143 | exit = true; 144 | break; 145 | } 146 | WritePCMToFile(filt_frame, pcmFile);//将处理后的AVFrame写入到文件 147 | av_frame_unref(filt_frame); 148 | } 149 | } 150 | 151 | avformat_close_input(&inputFormatContext); 152 | avcodec_close(avcodec_ctx); 153 | av_frame_free(&filt_frame); 154 | avfilter_graph_free(&filtergraph); 155 | } 156 | 157 | #endif -------------------------------------------------------------------------------- /src/audio_filter_aresample_output_pcm.h: -------------------------------------------------------------------------------- 1 | #ifndef AUDIO_FILTER_ARESAMPLE_OUTPUT_PCM_H_H_ 2 | #define AUDIO_FILTER_ARESAMPLE_OUTPUT_PCM_H_H_ 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | #include "avframe_util.h" 7 | 8 | void audio_filter_aresample_test(){ 9 | const char* input_file = "./V90405-190106.mp4"; 10 | AVFormatContext * inputFormatContext = NULL; 11 | if(avformat_open_input(&inputFormatContext, input_file, NULL, NULL) < 0){ 12 | return; 13 | } 14 | int audio_index = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 15 | if(audio_index < 0){ 16 | return; 17 | } 18 | AVStream* stream = inputFormatContext->streams[audio_index]; 19 | const AVCodec *avcodec = avcodec_find_decoder(stream->codecpar->codec_id); 20 | AVCodecContext* avcodec_ctx = avcodec_alloc_context3(avcodec); 21 | avcodec_parameters_to_context(avcodec_ctx, stream->codecpar); 22 | if(avcodec_open2(avcodec_ctx, avcodec, NULL) < 0){ 23 | return; 24 | } 25 | 26 | AVFilterGraph* filtergraph = avfilter_graph_alloc(); 27 | 28 | 29 | AVFilterContext* srcFilterCtx = NULL; 30 | int ret = InitABufferFilter(filtergraph, &srcFilterCtx, "src", avcodec_ctx->time_base, avcodec_ctx->sample_rate, 31 | avcodec_ctx->sample_fmt, avcodec_ctx->channel_layout); 32 | 33 | static const AVSampleFormat pre_mix_sample_fmt = AV_SAMPLE_FMT_S16; 34 | static const int64_t pre_mix_channel_layout = AV_CH_LAYOUT_STEREO; 35 | static const int pre_sample_rate = 32000; 36 | 37 | AVFilterContext* sinkFilterCtx = NULL; 38 | ret = InitABufferSinkFilter(filtergraph, &sinkFilterCtx, "out", pre_mix_sample_fmt, pre_sample_rate, pre_mix_channel_layout); 39 | 40 | // char in_args[512]; 41 | // const AVFilter* aresample = avfilter_get_by_name("aresample"); 42 | // snprintf(in_args, sizeof(in_args),"%d", pre_sample_rate); 43 | // AVFilterContext* aresampleFilterCtx = NULL; 44 | // ret = avfilter_graph_create_filter(&aresampleFilterCtx, aresample, "aformat", in_args, NULL, filtergraph); 45 | 46 | // ret = avfilter_link(srcFilterCtx, 0, aresampleFilterCtx, 0); 47 | // ret = avfilter_link(aresampleFilterCtx, 0, sinkFilterCtx, 0); 48 | 49 | ret = avfilter_link(srcFilterCtx, 0, sinkFilterCtx, 0); 50 | 51 | ret = avfilter_graph_config(filtergraph, NULL); 52 | if(ret < 0) 53 | return; 54 | 55 | 56 | FILE* pcmFile = fopen("./aresample.pcm", "wb"); 57 | 58 | AVPacket avpkt; 59 | av_init_packet(&avpkt); 60 | AVFrame *filt_frame = av_frame_alloc(); 61 | bool exit = false; 62 | while(!exit){ 63 | ret = av_read_frame(inputFormatContext, &avpkt); 64 | if(ret == 0){ 65 | if(avpkt.stream_index == audio_index){ 66 | ret = decode(avcodec_ctx, &avpkt, [&](AVCodecContext* ctx, const AVFrame* avframe){ 67 | AVFrame* inframe = av_frame_clone(avframe); 68 | if (av_buffersrc_add_frame_flags(srcFilterCtx, inframe, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 69 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 70 | } 71 | av_frame_unref(inframe); 72 | }); 73 | if(ret < 0){ 74 | printf("11111 decode avpkt1 failed\n"); 75 | } 76 | } 77 | av_packet_unref(&avpkt); 78 | 79 | }else{ 80 | ret = decode(avcodec_ctx, nullptr, [&](AVCodecContext* ctx, const AVFrame* avframe){ 81 | AVFrame* inframe = av_frame_clone(avframe); 82 | if (av_buffersrc_add_frame_flags(srcFilterCtx, inframe, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 83 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 84 | } 85 | av_frame_unref(inframe); 86 | }); 87 | exit = true;; 88 | } 89 | 90 | while (1) {//从buffersink设备上下文获取视频帧 91 | ret = av_buffersink_get_frame(sinkFilterCtx, filt_frame); 92 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 93 | break; 94 | } 95 | if (ret < 0){ 96 | exit = true; 97 | break; 98 | } 99 | WritePCMToFile(filt_frame, pcmFile);//将处理后的AVFrame写入到文件 100 | av_frame_unref(filt_frame); 101 | } 102 | } 103 | 104 | avformat_close_input(&inputFormatContext); 105 | avcodec_close(avcodec_ctx); 106 | av_frame_free(&filt_frame); 107 | avfilter_graph_free(&filtergraph); 108 | } 109 | 110 | #endif -------------------------------------------------------------------------------- /src/av_util_avclass_avoption_test.h: -------------------------------------------------------------------------------- 1 | #ifndef AV_UTIL_AVCLASS_AVOPTION_TEST_H_H_ 2 | #define AV_UTIL_AVCLASS_AVOPTION_TEST_H_H_ 3 | 4 | #include "global.h" 5 | 6 | typedef struct AVCLSTEST{ 7 | const AVClass* avcls; 8 | AVDictionary *userinfo; 9 | AVDictionary *score; 10 | int kkk; 11 | //userinfo opts 12 | char* name; 13 | char* gender; 14 | char* school; 15 | //score opts 16 | float math; 17 | float lenglish; 18 | }AVCLSTEST; 19 | 20 | #define OPTION_OFFSET(x) offsetof(AVCLSTEST, x) 21 | #define VE 0 22 | static const AVOption av_cls_test_options[] = { 23 | {"student_name", "学生姓名", OPTION_OFFSET(name), AV_OPT_TYPE_STRING, {.str = "张xxx"}, 0, 0, VE, nullptr}, 24 | {"student_gender", "学生性别", OPTION_OFFSET(gender), AV_OPT_TYPE_STRING, {.str = "人妖"}, 0, 0, VE, nullptr}, 25 | {"student_school", "所在学校", OPTION_OFFSET(school), AV_OPT_TYPE_STRING, {.str = "家里蹲大学"}, 0, 0, VE, nullptr}, 26 | {"math", "数学成绩", OPTION_OFFSET(math), AV_OPT_TYPE_FLOAT, {.dbl = 59.99}, 0, 100, VE, nullptr}, 27 | {"lenglish", "英语成绩", OPTION_OFFSET(lenglish), AV_OPT_TYPE_FLOAT, {.dbl = 59.99}, 0, 100, VE, nullptr}, 28 | { NULL } 29 | }; 30 | 31 | const AVClass g_avcls{ 32 | .class_name = "AVCLSTEST", 33 | .item_name = av_default_item_name, 34 | .option = av_cls_test_options, 35 | .version = LIBAVUTIL_VERSION_INT 36 | }; 37 | 38 | class IPlayControl{ 39 | public: 40 | virtual void play() = 0; 41 | virtual void pause() = 0; 42 | virtual void stop() = 0; 43 | }; 44 | 45 | class IPlayControlView{ 46 | public: 47 | virtual void setPlayControl(IPlayControl* interface) = 0; 48 | }; 49 | 50 | class PlayControlView: public IPlayControlView{ 51 | public: 52 | void setPlayControl(IPlayControl* interface) override{ 53 | _interface = interface; 54 | } 55 | void start(){ 56 | if(_interface != nullptr){ 57 | _interface->play(); 58 | } 59 | } 60 | void pause(){ 61 | if(_interface != nullptr){ 62 | _interface->pause(); 63 | } 64 | } 65 | void stop(){ 66 | if(_interface != nullptr){ 67 | _interface->stop(); 68 | } 69 | } 70 | private: 71 | IPlayControl* _interface; 72 | }; 73 | 74 | class PlayView : public IPlayControl{ 75 | public: 76 | void play() override{ 77 | printf("play\n"); 78 | } 79 | void pause() override{ 80 | printf("pause\n"); 81 | } 82 | void stop() override{ 83 | printf("stop\n"); 84 | } 85 | void setPlayControlView(IPlayControlView * interface){ 86 | interface->setPlayControl(this); 87 | } 88 | }; 89 | 90 | 91 | 92 | 93 | int avclass_avoption_test(){ 94 | 95 | { 96 | PlayControlView* controlView = new PlayControlView(); 97 | PlayView* playView = new PlayView(); 98 | playView->setPlayControlView(controlView); 99 | controlView->start(); 100 | controlView->pause(); 101 | controlView->stop(); 102 | 103 | delete controlView; 104 | delete playView; 105 | } 106 | 107 | 108 | AVCLSTEST* ccc = (AVCLSTEST*)av_mallocz(sizeof(AVCLSTEST)); 109 | ccc->avcls = &g_avcls; 110 | ccc->kkk = 10; 111 | //根据option中的默认値给结构体赋值 112 | av_opt_set_defaults(ccc); 113 | printf("姓名:%s, 性别:%s, 所在学校:%s, 数学:%f,英语:%f\n", ccc->name, ccc->gender, ccc->school, ccc->math, ccc->lenglish); 114 | 115 | av_dict_set(&ccc->userinfo, "student_name", "菜菜", 0); 116 | av_dict_set(&ccc->userinfo, "student_gender", "女", 0); 117 | av_dict_set(&ccc->userinfo, "student_school", "上海大学", 0); 118 | av_dict_set(&ccc->userinfo, "student_age", "25", 0); 119 | 120 | av_dict_set(&ccc->score, "math", "99", 0); 121 | av_dict_set(&ccc->score, "lenglish", "90", 0); 122 | 123 | AVDictionaryEntry* t = nullptr; 124 | while(t = av_dict_get(ccc->userinfo, "", t, AV_DICT_IGNORE_SUFFIX)){ 125 | int ret = av_opt_set(ccc, t->key, t->value, AV_OPT_SEARCH_CHILDREN); 126 | if(ret == AVERROR_OPTION_NOT_FOUND){ 127 | printf("not find option :%s\n", t->key); 128 | } 129 | } 130 | 131 | av_opt_set_dict(ccc, &ccc->score); 132 | 133 | printf("赋值后:\n"); 134 | printf("姓名:%s, 性别:%s, 所在学校:%s, 数学:%f,英语:%f\n", ccc->name, ccc->gender, ccc->school, ccc->math, ccc->lenglish); 135 | 136 | if(ccc->userinfo){ 137 | av_dict_free(&ccc->userinfo); 138 | } 139 | if(ccc->score){ 140 | av_dict_free(&ccc->score); 141 | } 142 | av_opt_free(ccc); 143 | av_freep(&ccc); 144 | return 0; 145 | } 146 | 147 | #endif -------------------------------------------------------------------------------- /src/av_util_dictionary_test.h: -------------------------------------------------------------------------------- 1 | #ifndef AV_UTIL_DICTIONARY_TEST_H_H_ 2 | #define AV_UTIL_DICTIONARY_TEST_H_H_ 3 | 4 | #include "global.h" 5 | 6 | 7 | void av_dictionary_test(){ 8 | AVDictionary *dict = nullptr; 9 | //添加两个键值对 10 | av_dict_set(&dict, "name", "cccccc", 0); 11 | av_dict_set_int(&dict, "age", 28, 0); 12 | //获取字典中的键值对数量 13 | int count = av_dict_count(dict); 14 | printf("dict count:%d\n", count); 15 | //根据主键获取值 16 | AVDictionaryEntry* entry = av_dict_get(dict, "name", NULL, 0); 17 | printf("key:%s, value:%s\n", "name", entry->value); 18 | 19 | //for 遍历字典 20 | entry = nullptr; 21 | for(int i=0; ikey, entry->value); 24 | } 25 | 26 | //while遍历字典 27 | printf("while遍历:\n"); 28 | entry = nullptr; 29 | //最重要的是第三个参数和第四个参数, 当entry==nullptr时从字典开始位置获取,当entry!=nullptr则从entry指向的下个位置开始获取 30 | //AV_DICT_IGNORE_SUFFIX, 获取开始位置的第一个键值对 31 | while(entry = av_dict_get(dict, "", entry, AV_DICT_IGNORE_SUFFIX)){ 32 | printf("key:%s, value:%s\n", entry->key, entry->value); 33 | } 34 | //释放AVDictionary 35 | av_dict_free(&dict); 36 | } 37 | 38 | 39 | #endif -------------------------------------------------------------------------------- /src/avframe_util.cpp: -------------------------------------------------------------------------------- 1 | #include "avframe_util.h" 2 | 3 | extern "C"{ 4 | #include 5 | #include 6 | #include 7 | } 8 | 9 | //写入YUV420 10 | void WriteYUV420ToFile(const AVFrame *frame, FILE* f){ 11 | if(frame->linesize[0] != frame->width){ 12 | for(int i=0; iheight; i++){//写入Y数据 13 | //如width=351, frame->linesize[0] = 352, 涉及到数据对齐的问题, 14 | //所以不能直接使用frame->width*frame->height大小直接copy 15 | fwrite(frame->data[0] + i*frame->linesize[0], 1, frame->width, f); 16 | } 17 | }else{ 18 | fwrite(frame->data[0], 1, frame->width*frame->height, f); 19 | } 20 | if(frame->linesize[1] != frame->width / 2){ 21 | for(int i=0; iheight / 2; i++){//写入U数据 22 | //如width=351, 涉及到数据对齐的问题, 23 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 24 | fwrite(frame->data[1] + i*frame->linesize[1], 1, frame->width / 2, f); 25 | } 26 | }else{ 27 | fwrite(frame->data[1], 1, (frame->width*frame->height)>>2, f); 28 | } 29 | if(frame->linesize[2] != frame->width / 2){ 30 | for(int i=0; iheight / 2; i++){//写入V数据 31 | //如width=351, 涉及到数据对齐的问题, 32 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 33 | fwrite(frame->data[2] + i*frame->linesize[2], 1, frame->width / 2, f); 34 | } 35 | }else{ 36 | fwrite(frame->data[2], 1, (frame->width*frame->height)>>2, f); 37 | } 38 | } 39 | void WriteYUV420P10LEToFile(const AVFrame *frame, FILE* f){ 40 | if(frame->linesize[0] != frame->width * 2){ 41 | for(int i=0; iheight; i++){//写入Y数据 42 | //如width=351, frame->linesize[0] = 352, 涉及到数据对齐的问题, 43 | //所以不能直接使用frame->width*frame->height大小直接copy 44 | fwrite(frame->data[0] + i*frame->linesize[0], 2, frame->width, f); 45 | } 46 | }else{ 47 | fwrite(frame->data[0], 2, frame->width*frame->height, f); 48 | } 49 | if(frame->linesize[1] != frame->width){ 50 | for(int i=0; iheight / 2; i++){//写入U数据 51 | //如width=351, 涉及到数据对齐的问题, 52 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 53 | fwrite(frame->data[1] + i*frame->linesize[1], 2, frame->width / 2, f); 54 | } 55 | }else{ 56 | fwrite(frame->data[1], 2, (frame->width*frame->height)>>2, f); 57 | } 58 | if(frame->linesize[2] != frame->width){ 59 | for(int i=0; iheight / 2; i++){//写入V数据 60 | //如width=351, 涉及到数据对齐的问题, 61 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 62 | fwrite(frame->data[2] + i*frame->linesize[2], 2, frame->width / 2, f); 63 | } 64 | }else{ 65 | fwrite(frame->data[2], 2, (frame->width*frame->height)>>2, f); 66 | } 67 | } 68 | //读取YUV420 69 | void ReadYUV420FromFile(AVFrame *frame, FILE *f){ 70 | if(frame->linesize[0] != frame->width){ 71 | for(int i=0; iheight; i++){//写入Y数据 72 | //如width=351, frame->linesize[0] = 352, 涉及到数据对齐的问题, 73 | //所以不能直接使用frame->width*frame->height大小直接copy 74 | fread(frame->data[0] + i*frame->linesize[0], 1, frame->width, f); 75 | } 76 | }else{ 77 | fread(frame->data[0], 1, frame->width * frame->height , f); 78 | } 79 | if(frame->linesize[1] != frame->width / 2){ 80 | for(int i=0; iheight / 2; i++){//写入U数据 81 | //如width=351, 涉及到数据对齐的问题, 82 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 83 | fread(frame->data[1] + i*frame->linesize[1], 1, frame->width / 2, f); 84 | } 85 | }else{ 86 | fread(frame->data[1], 1, (frame->width*frame->height)>>2, f); 87 | } 88 | if(frame->linesize[2] != frame->width / 2){ 89 | for(int i=0; iheight / 2; i++){//写入V数据 90 | //如width=351, 涉及到数据对齐的问题, 91 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 92 | fread(frame->data[2] + i*frame->linesize[2], 1, frame->width / 2, f); 93 | } 94 | }else{ 95 | fread(frame->data[2], 1, (frame->width*frame->height)>>2, f); 96 | } 97 | } 98 | 99 | void ReadYUV420P10LEFromFile(AVFrame *frame, FILE *f){ 100 | if(frame->linesize[0] != frame->width * 2){ 101 | for(int i=0; iheight; i++){//写入Y数据 102 | //如width=351, frame->linesize[0] = 352, 涉及到数据对齐的问题, 103 | //所以不能直接使用frame->width*frame->height大小直接copy 104 | fread(frame->data[0] + i*frame->linesize[0], 2, frame->width, f); 105 | } 106 | }else{ 107 | fread(frame->data[0], 2, frame->width * frame->height , f); 108 | } 109 | if(frame->linesize[1] != frame->width){ 110 | for(int i=0; iheight / 2; i++){//写入U数据 111 | //如width=351, 涉及到数据对齐的问题, 112 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 113 | fread(frame->data[1] + i*frame->linesize[1], 2, frame->width / 2, f); 114 | } 115 | }else{ 116 | fread(frame->data[1], 2, (frame->width*frame->height)>>2, f); 117 | } 118 | if(frame->linesize[2] != frame->width){ 119 | for(int i=0; iheight / 2; i++){//写入V数据 120 | //如width=351, 涉及到数据对齐的问题, 121 | //所以不能直接使用(frame->width*frame->height) >> 2大小直接copy 122 | fread(frame->data[2] + i*frame->linesize[2], 2, frame->width / 2, f); 123 | } 124 | }else{ 125 | fread(frame->data[2], 2, (frame->width*frame->height)>>2, f); 126 | } 127 | } 128 | 129 | 130 | // int linesize[AV_NUM_DATA_POINTERS]; 131 | 132 | // /** 133 | // * pointers to the data planes/channels. 134 | // * 135 | // * For video, this should simply point to data[]. 136 | // * 137 | // * For planar audio, each channel has a separate data pointer, and 138 | // * linesize[0] contains the size of each channel buffer. 139 | // * For packed audio, there is just one data pointer, and linesize[0] 140 | // * contains the total size of the buffer for all channels. 141 | // * 142 | // * Note: Both data and extended_data should always be set in a valid frame, 143 | // * but for planar audio with more channels that can fit in data, 144 | // * extended_data must be used in order to access all channels. 145 | // */ 146 | // uint8_t **extended_data; 147 | 148 | //写入PCM,裸PCM文件存在格式:LRLRLR...... 149 | void WritePCMToFile(const AVFrame *frame, FILE* f){ 150 | //根据format得出每个sample所占的字节数,这里的字节数是指一个通道所占的字节数 151 | int datasize = av_get_bytes_per_sample((AVSampleFormat)frame->format); 152 | int planar = av_sample_fmt_is_planar((AVSampleFormat)frame->format); 153 | if(planar){ 154 | for(int i=0; inb_samples; i++){ //循环每个采样 155 | for(int j=0; jchannels; j++){//如果每个采样有多个通道,则data[0]为第一个通道,data[1]为第二个通道,以此类推 156 | fwrite(frame->extended_data[j] + i*datasize, datasize, 1, f); 157 | } 158 | } 159 | }else{ 160 | fwrite(frame->extended_data[0], datasize * frame->channels, frame->nb_samples, f); 161 | } 162 | 163 | } 164 | //读取PCM 165 | void ReadPCMFromFile(AVFrame* frame, FILE* f){ 166 | //根据format得出每个sample所占的字节数,这里的字节数是指一个通道所占的字节数 167 | int datasize = av_get_bytes_per_sample((AVSampleFormat)frame->format); 168 | int planar = av_sample_fmt_is_planar((AVSampleFormat)frame->format); 169 | if(planar){ 170 | for(int i=0; inb_samples; i++){//循环每个采样 171 | for(int j=0; jchannels; j++){//如果每个采样有多个通道,则data[0]为第一个通道,data[1]为第二个通道,以此类推 172 | fread(frame->extended_data[j] + i*datasize, datasize, 1, f); 173 | } 174 | } 175 | }else{ 176 | fread(frame->extended_data[0], datasize * frame->channels, frame->nb_samples, f); 177 | } 178 | } 179 | 180 | //横向合并YUV420,要求left的高度和right的高度相同 181 | AVFrame* YUV420HorizontalMerge(const AVFrame* left, const AVFrame* right){ 182 | if(left == NULL || right == NULL) 183 | return NULL; 184 | if(left->height != right->height) 185 | return NULL; 186 | AVFrame* destFrame = av_frame_alloc(); 187 | int dest_height = left->height; 188 | int dest_width = left->width+left->width; 189 | destFrame->format = AV_PIX_FMT_YUV420P; 190 | destFrame->width = dest_width; 191 | destFrame->height = dest_height; 192 | av_frame_get_buffer(destFrame, 32); 193 | 194 | for(int i=0; idata[0] + i*destFrame->linesize[0], left->data[0] + i * left->linesize[0] , left->width); 197 | //复制right一行Y 198 | memcpy(destFrame->data[0] + i*destFrame->linesize[0] + left->width, right->data[0] + i * right->linesize[0], right->width); 199 | } 200 | 201 | for(int j=0; jdata[1] + j*destFrame->linesize[1], left->data[1] + j * left->linesize[1], left->width/2); 204 | //复制right一行U 205 | memcpy(destFrame->data[1] + j*destFrame->linesize[1] + left->width/2, right->data[1] + j * right->linesize[1], right->width/2); 206 | 207 | //复制left一行V 208 | memcpy(destFrame->data[2] + j*destFrame->linesize[2], left->data[2] + j * left->linesize[2], left->width/2); 209 | //复制right一行V 210 | memcpy(destFrame->data[2] + j*destFrame->linesize[2] + left->width/2, right->data[2] + j * right->linesize[2], right->width/2); 211 | } 212 | return destFrame; 213 | } 214 | //纵向合并YUV420,要求up和down的宽度相同 215 | AVFrame* YUV420VerticalMerge(const AVFrame* up, const AVFrame* down){ 216 | if(up == NULL || down == NULL) 217 | return NULL; 218 | if(up->width != down->width) 219 | return NULL; 220 | AVFrame* destFrame = av_frame_alloc(); 221 | int dest_height = up->height + down->height; 222 | int dest_width = up->width; 223 | destFrame->format = AV_PIX_FMT_YUV420P; 224 | destFrame->width = dest_width; 225 | destFrame->height = dest_height; 226 | av_frame_get_buffer(destFrame, 32); 227 | 228 | {//复制Y 229 | int lines = 0; 230 | for(int i=0; iheight; i++){ 231 | //复制up一行Y 232 | memcpy(destFrame->data[0] + (lines++)*destFrame->linesize[0], up->data[0] + i * up->linesize[0] , up->width); 233 | } 234 | for(int j=0; jheight; j++){ 235 | //复制down一行Y 236 | memcpy(destFrame->data[0] + (lines++)*destFrame->linesize[0], down->data[0] + j * down->linesize[0] , down->width); 237 | } 238 | } 239 | 240 | {//复制UV 241 | int lines = 0; 242 | for(int i=0; iheight/2; i++){ 243 | //复制up一行U 244 | memcpy(destFrame->data[1] + lines*destFrame->linesize[1], up->data[1] + i * up->linesize[1] , up->width / 2); 245 | //复制up一行V 246 | memcpy(destFrame->data[2] + lines*destFrame->linesize[2], up->data[2] + i * up->linesize[2] , up->width / 2); 247 | 248 | lines++; 249 | } 250 | for(int j=0; jheight/2; j++){ 251 | //复制down一行U 252 | memcpy(destFrame->data[1] + lines*destFrame->linesize[1], down->data[1] + j * down->linesize[1] , down->width/2); 253 | //复制down一行V 254 | memcpy(destFrame->data[2] + lines*destFrame->linesize[2], down->data[2] + j * down->linesize[2] , down->width/2); 255 | 256 | lines++; 257 | } 258 | } 259 | return destFrame; 260 | } 261 | -------------------------------------------------------------------------------- /src/avframe_util.h: -------------------------------------------------------------------------------- 1 | #ifndef _AVFRAME_UTIL_H_H_ 2 | #define _AVFRAME_UTIL_H_H_ 3 | 4 | #include 5 | 6 | struct AVFrame; 7 | //写入YUV420 8 | void WriteYUV420ToFile(const AVFrame *frame, FILE* f); 9 | void WriteYUV420P10LEToFile(const AVFrame *frame, FILE* f); 10 | //读取YUV420 11 | void ReadYUV420FromFile(AVFrame *frame, FILE *f); 12 | void ReadYUV420P10LEFromFile(AVFrame *frame, FILE *f); 13 | //写入PCM 14 | void WritePCMToFile(const AVFrame *frame, FILE* f); 15 | //读取PCM 16 | void ReadPCMFromFile(AVFrame* frame, FILE* f); 17 | 18 | //横向合并YUV420,要求left的高度和right的高度相同 19 | AVFrame* YUV420HorizontalMerge(const AVFrame* left, const AVFrame* right); 20 | //纵向合并YUV420,要求up和down的宽度相同 21 | AVFrame* YUV420VerticalMerge(const AVFrame* up, const AVFrame* down); 22 | 23 | #endif -------------------------------------------------------------------------------- /src/codecimpl.cpp: -------------------------------------------------------------------------------- 1 | #include "codecimpl.h" 2 | extern "C"{ 3 | #include 4 | } 5 | 6 | int encode(AVCodecContext *ctx, const AVFrame *frame, std::shared_ptr callback){ 7 | auto onSuccess = std::bind(&EncodeCallback::OnSuccess, callback, std::placeholders::_1, std::placeholders::_2); 8 | return encode(ctx, frame, onSuccess); 9 | } 10 | 11 | int encode(AVCodecContext *ctx, const AVFrame *frame, OnEncodeSuccess onSucess){ 12 | int ret; 13 | ret = avcodec_send_frame(ctx, frame); 14 | if(ret < 0){ 15 | return -1; 16 | } 17 | AVPacket *pkt = av_packet_alloc(); 18 | int result = 0; 19 | while(ret >=0){ 20 | ret = avcodec_receive_packet(ctx, pkt); 21 | if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 22 | break; 23 | }else if(ret < 0){ 24 | result = -1; 25 | break; 26 | }else{ 27 | if(onSucess!=nullptr) 28 | onSucess(ctx, pkt); 29 | av_packet_unref(pkt); 30 | } 31 | } 32 | av_packet_free(&pkt); 33 | return result; 34 | } 35 | 36 | int decode(AVCodecContext *ctx, const AVPacket* packet, std::shared_ptr callback){ 37 | auto onSuccess = std::bind(&DecodeCallback::OnSuccess, callback, std::placeholders::_1, std::placeholders::_2); 38 | return decode(ctx, packet, onSuccess); 39 | } 40 | 41 | int decode(AVCodecContext *ctx, const AVPacket* packet, OnDecodeSuccess onSucess){ 42 | int ret = avcodec_send_packet(ctx, packet); 43 | if (ret < 0) { 44 | return -1; 45 | } 46 | AVFrame* frame = av_frame_alloc(); 47 | int result = 0; 48 | while(ret >= 0){ 49 | ret = avcodec_receive_frame(ctx, frame); 50 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){ 51 | break; 52 | }else if (ret < 0) { 53 | result = -1; 54 | break; 55 | }else{ 56 | if(onSucess!=nullptr) 57 | onSucess(ctx, frame); 58 | av_frame_unref(frame); 59 | } 60 | } 61 | av_frame_free(&frame); 62 | return result; 63 | } -------------------------------------------------------------------------------- /src/codecimpl.h: -------------------------------------------------------------------------------- 1 | #ifndef CODEC_IMPL_H_H_ 2 | #define CODEC_IMPL_H_H_ 3 | 4 | #include 5 | #include 6 | 7 | struct AVCodecContext; 8 | struct AVFrame; 9 | struct AVPacket; 10 | 11 | //编码回调接口 12 | class EncodeCallback{ 13 | public: 14 | /** 15 | * 编码成功回调函数 16 | * ctx: 编码ctx 17 | * avpkt: 编码后的数据结构体 18 | */ 19 | virtual void OnSuccess(AVCodecContext *ctx, const AVPacket* avpkt) = 0; 20 | }; 21 | 22 | class DecodeCallback{ 23 | public: 24 | /** 25 | * 解码成功回调函数 26 | * ctx: 解码ctx 27 | * frame: 解码后的数据结构体 28 | */ 29 | virtual void OnSuccess(AVCodecContext *ctx, const AVFrame* frame) = 0; 30 | }; 31 | 32 | //编码成功回调函数 33 | using OnEncodeSuccess = std::function; 34 | 35 | //解码成功回调函数 36 | using OnDecodeSuccess = std::function; 37 | 38 | /** 39 | * 编码实现函数 40 | * ctx: 编码ctx 41 | * frame: 需要编码的数据 42 | * callback: 回调接口 43 | * return: 成功:0, 失败:非0 44 | */ 45 | int encode(AVCodecContext *ctx, const AVFrame *frame, std::shared_ptr callback); 46 | 47 | /** 48 | * 编码实现函数 49 | * ctx: 编码ctx 50 | * frame: 需要编码的数据 51 | * onSucess: 成功回调函数 52 | * return: 成功:0, 失败:非0 53 | */ 54 | int encode(AVCodecContext *ctx, const AVFrame *frame, OnEncodeSuccess onSucess); 55 | /** 56 | * 解码实现函数 57 | * ctx: 解码ctx 58 | * packet: 需要解码的数据 59 | * callback: 回调接口 60 | * return: 成功:0, 失败:非0 61 | */ 62 | int decode(AVCodecContext *ctx, const AVPacket* packet, std::shared_ptr callback); 63 | 64 | /** 65 | * 解码实现函数 66 | * ctx: 解码ctx 67 | * packet: 需要解码的数据 68 | * onSucess: 成功回调函数 69 | * return: 成功:0, 失败:非0 70 | */ 71 | int decode(AVCodecContext *ctx, const AVPacket* packet, OnDecodeSuccess onSucess); 72 | 73 | #endif -------------------------------------------------------------------------------- /src/cut_mp4_test.h: -------------------------------------------------------------------------------- 1 | #ifndef CUT_MP4_TEST_H_H_ 2 | #define CUT_MP4_TEST_H_H_ 3 | 4 | 5 | #include "global.h" 6 | 7 | //判断是否需要继续读取packet 8 | static inline bool isEndReadPacket(const uint8_t* stream_exit, const AVFormatContext* in_avformat_ctx){ 9 | bool exit = true; 10 | for(int i=0; inb_streams; i++){ 11 | if(stream_exit[i]==0){ 12 | exit = false; 13 | break; 14 | } 15 | } 16 | return exit; 17 | } 18 | 19 | /* 20 | 裁剪多媒体文件 21 | @inputfile: 输入文件 22 | @outputfile: 输出文件 23 | @starttime: 开始时间 24 | @endtime: 结束时间 25 | */ 26 | int cut_media_file2(const char* inputfile, const char* outputfile, float starttime, float endtime){ 27 | int ret = -1; 28 | AVFormatContext* in_avformat_ctx = nullptr; 29 | AVFormatContext* out_avformat_ctx = nullptr; 30 | AVPacket * pkt; 31 | //计算时间差间隔 32 | int64_t *pre_dts; 33 | int64_t *pre_pts; 34 | //计算存储当前时间 35 | int64_t *cur_dts; 36 | int64_t *cur_pts; 37 | uint8_t *stream_exit; 38 | //视频进行解码再编码 39 | AVCodecContext *vDecContext = nullptr; 40 | AVCodecContext *vEncContext = nullptr; 41 | int video_stream_index = -1; 42 | AVFrame* inframe; 43 | static int pts = 0; 44 | auto flush = [&](){ 45 | encode(vEncContext, nullptr, [=](AVCodecContext* avc, const AVPacket* avpacket){ 46 | AVPacket* newpkt = av_packet_clone(avpacket); 47 | av_packet_rescale_ts(newpkt, vEncContext->time_base, out_avformat_ctx->streams[video_stream_index]->time_base); 48 | newpkt->stream_index = video_stream_index; 49 | int ret = av_interleaved_write_frame(out_avformat_ctx, newpkt); 50 | av_packet_free(&newpkt); 51 | if (ret < 0) { 52 | fprintf(stderr, "av_interleaved_write_frame error\n"); 53 | } 54 | }); 55 | }; 56 | 57 | bool isNeedFlush = false; 58 | bool hasIFrame = false; 59 | 60 | //带goto语言的变量都需要提前定义如下 61 | if(avformat_open_input(&in_avformat_ctx, inputfile, nullptr, nullptr) < 0){ 62 | goto end; 63 | } 64 | if(avformat_find_stream_info(in_avformat_ctx, nullptr)){ 65 | goto end; 66 | } 67 | if(avformat_alloc_output_context2(&out_avformat_ctx, nullptr, nullptr, outputfile) < 0){ 68 | goto end; 69 | } 70 | 71 | for(int i=0; inb_streams; i++){ 72 | AVStream* stream = in_avformat_ctx->streams[i]; 73 | AVStream* out = avformat_new_stream(out_avformat_ctx, nullptr); 74 | if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ 75 | const AVCodec* decCodec = avcodec_find_decoder(stream->codecpar->codec_id); 76 | vDecContext = avcodec_alloc_context3(decCodec); 77 | avcodec_parameters_to_context(vDecContext, stream->codecpar); 78 | if(avcodec_open2(vDecContext, decCodec, nullptr)<0){ 79 | goto end; 80 | } 81 | 82 | const AVCodec* encCodec = avcodec_find_encoder(stream->codecpar->codec_id); 83 | vEncContext = avcodec_alloc_context3(encCodec); 84 | avcodec_parameters_to_context(vEncContext, stream->codecpar); 85 | //根据解码参数配置编码参数,如果能有办法从AVStream获取所有编码参数最好 86 | //那样就可以在碰到下一个关键帧直接数据copy 87 | { 88 | vEncContext->framerate = stream->r_frame_rate; 89 | vEncContext->time_base = (AVRational){vEncContext->framerate.den, vEncContext->framerate.num}; 90 | 91 | vEncContext->gop_size = vDecContext->gop_size; 92 | vEncContext->has_b_frames = vDecContext->has_b_frames; 93 | vEncContext->max_b_frames = vDecContext->max_b_frames; 94 | vEncContext->pix_fmt = vDecContext->pix_fmt; 95 | } 96 | if(avcodec_open2(vEncContext, encCodec, nullptr)<0){ 97 | goto end; 98 | } 99 | avcodec_parameters_from_context(out->codecpar, vEncContext); 100 | out->codecpar->codec_tag = 0; 101 | video_stream_index = i; 102 | 103 | }else{ 104 | avcodec_parameters_copy(out->codecpar, stream->codecpar); 105 | //下边这个参数很重要,如果不重置这个参数会出现类似于Tag avc1 incompatible with output codec id '27' ([7][0][0][0])这个的错误 106 | out->codecpar->codec_tag = 0; 107 | } 108 | } 109 | 110 | 111 | if (!(out_avformat_ctx->oformat->flags & AVFMT_NOFILE)) { 112 | ret = avio_open(&out_avformat_ctx->pb, outputfile, AVIO_FLAG_WRITE); 113 | if (ret < 0) { 114 | fprintf(stderr, "Could not open output file '%s'", outputfile); 115 | goto end; 116 | } 117 | } 118 | 119 | if(avformat_write_header(out_avformat_ctx, nullptr) < 0){ 120 | goto end; 121 | } 122 | 123 | 124 | //跳转到指定帧 125 | ret = av_seek_frame(in_avformat_ctx, -1, starttime * AV_TIME_BASE, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_FRAME); 126 | if (ret < 0) { 127 | fprintf(stderr, "Error seek\n"); 128 | goto end; 129 | } 130 | 131 | // 根据流数量申请空间,并全部初始化为0 132 | pre_dts = (int64_t*)malloc(sizeof(int64_t) * in_avformat_ctx->nb_streams); 133 | memset(pre_dts, 0, sizeof(int64_t) * in_avformat_ctx->nb_streams); 134 | 135 | pre_pts = (int64_t*)malloc(sizeof(int64_t) * in_avformat_ctx->nb_streams); 136 | memset(pre_pts, 0, sizeof(int64_t) * in_avformat_ctx->nb_streams); 137 | 138 | stream_exit = (uint8_t*)malloc(sizeof(uint8_t)*in_avformat_ctx->nb_streams); 139 | memset(stream_exit, 0, sizeof(uint8_t) * in_avformat_ctx->nb_streams); 140 | 141 | pkt = av_packet_alloc(); 142 | inframe = av_frame_alloc(); 143 | 144 | { 145 | while(!isEndReadPacket(stream_exit, in_avformat_ctx)){ 146 | ret = av_read_frame(in_avformat_ctx, pkt); 147 | if(ret!=0){ 148 | printf("read error or file end\n"); 149 | break; 150 | } 151 | AVStream *in_stream = in_avformat_ctx->streams[pkt->stream_index]; 152 | AVStream *out_stream = out_avformat_ctx->streams[pkt->stream_index]; 153 | // 时间超过要截取的时间,就退出循环 154 | if (av_q2d(in_stream->time_base) * pkt->pts > endtime) { 155 | stream_exit[pkt->stream_index] = 1; 156 | av_packet_unref(pkt); 157 | continue; 158 | } 159 | //视频进行二次编码 160 | if(video_stream_index == pkt->stream_index){ 161 | bool b = av_q2d(in_stream->time_base) * pkt->pts >= starttime; 162 | decode(vDecContext, pkt, [&](AVCodecContext* avctx, const AVFrame* frame){ 163 | if(b){ 164 | inframe->format = AV_PIX_FMT_YUV420P; 165 | inframe->width = frame->width; 166 | inframe->height = frame->height; 167 | av_frame_get_buffer(inframe, 32); 168 | av_image_copy(inframe->data, inframe->linesize, (const uint8_t**)frame->data, frame->linesize, AV_PIX_FMT_YUV420P,inframe->width, inframe->height); 169 | 170 | inframe->pts = pts++; 171 | inframe->pict_type = AV_PICTURE_TYPE_NONE; 172 | isNeedFlush = true; 173 | encode(vEncContext, inframe, [=](AVCodecContext* avc, const AVPacket* avpacket){ 174 | 175 | AVPacket* newpkt = av_packet_clone(avpacket); 176 | av_packet_rescale_ts(newpkt, vEncContext->time_base, out_stream->time_base); 177 | newpkt->stream_index = video_stream_index; 178 | //printf("before rescale_ts: pts:%lld, dts:%lld, after rescale_ts:pts:%lld, dts:%lld\n", avpacket->pts, avpacket->dts, newpkt->pts, newpkt->dts); 179 | int ret = av_interleaved_write_frame(out_avformat_ctx, newpkt); 180 | av_packet_free(&newpkt); 181 | if (ret < 0) { 182 | fprintf(stderr, "Error muxing packet\n"); 183 | } 184 | }); 185 | av_frame_unref(inframe); 186 | } 187 | }); 188 | }else{ 189 | if(av_q2d(in_stream->time_base) * pkt->pts >= starttime){ 190 | // 将截取后的每个流的起始dts 、pts保存下来,作为开始时间,用来做后面的时间基转换 191 | if (pre_dts[pkt->stream_index] == 0) { 192 | pre_dts[pkt->stream_index] = pkt->dts; 193 | } 194 | if (pre_pts[pkt->stream_index] == 0) { 195 | pre_pts[pkt->stream_index] = pkt->pts; 196 | } 197 | 198 | // 时间基转换 199 | pkt->pts = av_rescale_q_rnd(pkt->pts - pre_pts[pkt->stream_index], in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 200 | pkt->dts = av_rescale_q_rnd(pkt->dts - pre_dts[pkt->stream_index], in_stream->time_base,out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 201 | 202 | //一帧视频播放时间必须在解码时间点之后,当出现pkt->pts < pkt->dts时会导致程序异常,所以我们丢掉有问题的帧,不会有太大影响。 203 | if (pkt->pts < pkt->dts) { 204 | continue; 205 | } 206 | ret = av_interleaved_write_frame(out_avformat_ctx, pkt); 207 | if (ret < 0) { 208 | fprintf(stderr, "Error muxing packet\n"); 209 | break; 210 | } 211 | 212 | } 213 | } 214 | av_packet_unref(pkt); 215 | } 216 | } 217 | //二次编码 218 | if(isNeedFlush) 219 | flush(); 220 | 221 | free(pre_dts); 222 | free(pre_pts); 223 | free(stream_exit); 224 | 225 | av_write_trailer(out_avformat_ctx); 226 | 227 | av_packet_free(&pkt); 228 | av_frame_free(&inframe); 229 | 230 | ret = 0; 231 | end: 232 | avcodec_close(vDecContext); 233 | avcodec_close(vEncContext); 234 | 235 | avformat_close_input(&in_avformat_ctx); 236 | 237 | /* close output */ 238 | if (out_avformat_ctx && !(out_avformat_ctx->flags & AVFMT_NOFILE)) 239 | avio_closep(&out_avformat_ctx->pb); 240 | avformat_free_context(out_avformat_ctx); 241 | 242 | return ret; 243 | } 244 | 245 | /* 246 | 裁剪多媒体文件 247 | @inputfile: 输入文件 248 | @outputfile: 输出文件 249 | @starttime: 开始时间 250 | @time: 时长 251 | */ 252 | int cut_media_file(const char* inputfile, const char* outputfile, float starttime, float time){ 253 | return cut_media_file2(inputfile, outputfile, starttime, starttime+time); 254 | } 255 | 256 | int cut_media_file_test(){ 257 | const char* inputfile = "./半壶纱.mp4"; 258 | const char* outputfile = "./cutfile.mp4"; 259 | float starttime = 5; 260 | float time = 15; 261 | return cut_media_file(inputfile, outputfile, starttime, time); 262 | } 263 | 264 | 265 | #endif -------------------------------------------------------------------------------- /src/decode_audio_output_pcm_test.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODE_AUDIO_OUTPUT_PCM_TEST_H_H_ 2 | #define DECODE_AUDIO_OUTPUT_PCM_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | #include 7 | #include "audio_convert_tool.h" 8 | #include "avframe_util.h" 9 | 10 | //解码音频并将PCM数据写入文件 11 | int decode_audio_output_pcm_test(){ 12 | const char * inFileName = "./半壶纱.mp4"; 13 | AVFormatContext * avformatctx = nullptr; 14 | if(avformat_open_input(&avformatctx, inFileName, nullptr, nullptr)<0){ 15 | return -1; 16 | } 17 | int audio_stream_index = av_find_best_stream(avformatctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 18 | if(audio_stream_index < 0){ 19 | return -1; 20 | } 21 | AVStream* stream = avformatctx->streams[audio_stream_index]; 22 | const AVCodec* pCodec = avcodec_find_decoder(stream->codecpar->codec_id); 23 | AVCodecContext *audioContext = avcodec_alloc_context3(pCodec); 24 | avcodec_parameters_to_context(audioContext, stream->codecpar); 25 | 26 | if(avcodec_open2(audioContext, pCodec, nullptr)<0){ 27 | return -1; 28 | } 29 | //原始PCM格式 30 | FILE* pcmFile = fopen("./t_44100_2_float.pcm", "wb"); 31 | FILE* convertPcmFile = fopen("./t_16000_1_s16.pcm", "wb"); 32 | 33 | std::shared_ptr swrCtxManager = std::make_shared(audioContext->channel_layout, audioContext->sample_rate, 34 | (AVSampleFormat)audioContext->sample_fmt, AV_CH_LAYOUT_MONO, 16000, AV_SAMPLE_FMT_S16); 35 | if(!swrCtxManager->Init()){ 36 | return -1; 37 | } 38 | 39 | AVFrame* outframe = av_frame_alloc(); 40 | 41 | auto initFrameWidthData = [&](uint64_t channel_layout, int format, int sample_rate, int frame_size, void** frame_data){ 42 | outframe->nb_samples = frame_size; 43 | outframe->channel_layout = channel_layout; 44 | outframe->format = format; 45 | outframe->sample_rate = sample_rate; 46 | av_frame_get_buffer(outframe, 0);//这个函数会根据channel_layout初始化channel(av_get_channel_layout_nb_channels) 47 | int datasize = av_get_bytes_per_sample((AVSampleFormat)outframe->format); 48 | for(int i=0; inb_samples; i++){ 49 | for(int j=0; jchannels; j++){ 50 | memcpy(outframe->data[j] + i*datasize, frame_data[j]+ i*datasize, datasize); 51 | } 52 | } 53 | }; 54 | 55 | auto callback = [&](AVCodecContext *ctx, const AVFrame* frame){ 56 | WritePCMToFile(frame, pcmFile); 57 | int ret = swrCtxManager->Convert((const uint8_t**)frame->extended_data, frame->nb_samples); 58 | if(ret > 0){ 59 | initFrameWidthData(AV_CH_LAYOUT_MONO, AV_SAMPLE_FMT_S16, 16000, ret, (void**)swrCtxManager->GetConvertedBuffer()); 60 | WritePCMToFile(outframe, convertPcmFile); 61 | av_frame_unref(outframe); 62 | } 63 | }; 64 | AVPacket *packet = av_packet_alloc(); 65 | av_init_packet(packet); 66 | while(1){ 67 | int ret = av_read_frame(avformatctx, packet); 68 | if(ret!=0){ 69 | printf("read error or file end\n"); 70 | break; 71 | } 72 | if(packet->stream_index==audio_stream_index){ 73 | decode(audioContext, packet, callback); 74 | } 75 | 76 | av_packet_unref(packet); 77 | } 78 | 79 | decode(audioContext, nullptr, callback); 80 | 81 | avcodec_close(audioContext); 82 | 83 | avformat_close_input(&avformatctx); 84 | 85 | av_packet_free(&packet); 86 | 87 | av_frame_free(&outframe); 88 | 89 | fclose(pcmFile); 90 | 91 | return 0; 92 | } 93 | 94 | 95 | #endif -------------------------------------------------------------------------------- /src/decode_h264_test.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODEC_H264_TEST_H_H_ 2 | #define DECODEC_H264_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "avframe_util.h" 11 | #include "codecimpl.h" 12 | 13 | 14 | #define INBUF_SIZE 4096 15 | 16 | int decode_h264_test(){ 17 | const char* iFileName = "test.h264"; 18 | FILE* ifile = fopen(iFileName, "rb"); 19 | if(ifile == NULL) 20 | return -1; 21 | const char* oFileName = "hello.yuv"; 22 | FILE* ofile = fopen(oFileName, "wb"); 23 | if(ifile == NULL) 24 | ofile -1; 25 | AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264); 26 | AVCodecContext* avcodecCtx = avcodec_alloc_context3(codec); 27 | if(avcodec_open2(avcodecCtx, codec, nullptr) < 0){ 28 | return -1; 29 | } 30 | AVCodecParserContext *parser = av_parser_init(codec->id); 31 | if (!parser) { 32 | fprintf(stderr, "parser not found\n"); 33 | return -1; 34 | } 35 | AVFrame* frame = av_frame_alloc(); 36 | if (!frame) { 37 | fprintf(stderr, "Could not allocate video frame\n"); 38 | return -1; 39 | } 40 | 41 | uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; 42 | uint8_t *data; 43 | size_t data_size; 44 | int ret; 45 | AVPacket *pkt= av_packet_alloc(); 46 | av_init_packet(pkt); 47 | 48 | auto callback = [&](AVCodecContext *ctx, const AVFrame* frame){ 49 | WriteYUV420ToFile(frame, ofile); 50 | printf("解码成功\n"); 51 | }; 52 | 53 | while (!feof(ifile)) { 54 | /* read raw data from the input file */ 55 | data_size = fread(inbuf, 1, INBUF_SIZE, ifile); 56 | if (!data_size) 57 | break; 58 | 59 | /* use the parser to split the data into frames */ 60 | data = inbuf; 61 | while (data_size > 0) { 62 | ret = av_parser_parse2(parser, avcodecCtx, &pkt->data, &pkt->size, 63 | data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); 64 | if (ret < 0) { 65 | fprintf(stderr, "Error while parsing\n"); 66 | exit(1); 67 | } 68 | data += ret; 69 | data_size -= ret; 70 | 71 | if (pkt->size) 72 | decode(avcodecCtx, pkt, callback); 73 | } 74 | } 75 | 76 | /* flush the decoder */ 77 | decode(avcodecCtx, nullptr, callback); 78 | 79 | fclose(ifile); 80 | fclose(ofile); 81 | 82 | av_parser_close(parser); 83 | avcodec_free_context(&avcodecCtx); 84 | av_frame_free(&frame); 85 | av_packet_free(&pkt); 86 | } 87 | 88 | #endif -------------------------------------------------------------------------------- /src/decode_h265_test.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODEC_H265_TEST_H_H_ 2 | #define DECODEC_H265_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "avframe_util.h" 11 | #include "codecimpl.h" 12 | 13 | 14 | #define INBUF_SIZE 4096 15 | 16 | int decode_h265_test(){ 17 | const char* iFileName = "test2.h265"; 18 | FILE* ifile = fopen(iFileName, "rb"); 19 | if(ifile == NULL) 20 | return -1; 21 | const char* oFileName = "output.yuv"; 22 | FILE* ofile = fopen(oFileName, "wb"); 23 | if(ifile == NULL) 24 | return -1; 25 | AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H265); 26 | AVCodecContext* avcodecCtx = avcodec_alloc_context3(codec); 27 | if(avcodec_open2(avcodecCtx, codec, nullptr) < 0){ 28 | return -1; 29 | } 30 | AVCodecParserContext *parser = av_parser_init(codec->id); 31 | if (!parser) { 32 | fprintf(stderr, "parser not found\n"); 33 | return -1; 34 | } 35 | AVFrame* frame = av_frame_alloc(); 36 | if (!frame) { 37 | fprintf(stderr, "Could not allocate video frame\n"); 38 | return -1; 39 | } 40 | 41 | uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; 42 | uint8_t *data; 43 | size_t data_size; 44 | int ret; 45 | AVPacket *pkt= av_packet_alloc(); 46 | av_init_packet(pkt); 47 | 48 | auto callback = [&](AVCodecContext *ctx, const AVFrame* frame){ 49 | WriteYUV420P10LEToFile(frame, ofile); 50 | printf("解码成功\n"); 51 | }; 52 | 53 | while (!feof(ifile)) { 54 | /* read raw data from the input file */ 55 | data_size = fread(inbuf, 1, INBUF_SIZE, ifile); 56 | if (!data_size) 57 | break; 58 | 59 | /* use the parser to split the data into frames */ 60 | data = inbuf; 61 | while (data_size > 0) { 62 | ret = av_parser_parse2(parser, avcodecCtx, &pkt->data, &pkt->size, 63 | data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); 64 | if (ret < 0) { 65 | fprintf(stderr, "Error while parsing\n"); 66 | exit(1); 67 | } 68 | data += ret; 69 | data_size -= ret; 70 | 71 | if (pkt->size) 72 | decode(avcodecCtx, pkt, callback); 73 | } 74 | } 75 | 76 | /* flush the decoder */ 77 | decode(avcodecCtx, nullptr, callback); 78 | 79 | fclose(ifile); 80 | fclose(ofile); 81 | 82 | av_parser_close(parser); 83 | avcodec_free_context(&avcodecCtx); 84 | av_frame_free(&frame); 85 | av_packet_free(&pkt); 86 | } 87 | 88 | #endif -------------------------------------------------------------------------------- /src/decode_heic_output_yuv420_test.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODE_HEIC_OUTPUT_YUV420_TEST 2 | #define DECODE_HEIC_OUTPUT_YUV420_TEST 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | using namespace HEIFPP; 20 | 21 | void decode_heic_output_yuv420() 22 | { 23 | 24 | 25 | Heif *readHeif = new Heif(); 26 | // const char* heif_file_name = "./test2.heif"; 27 | // const char* heif_file_name = "./example.heic"; 28 | // const char *heif_file_name = "pw_2020_05_13_16_20_42_707_cloud_process.heic"; 29 | const char *heif_file_name = "1233.heic"; 30 | if (readHeif->load(heif_file_name) != Result::OK) 31 | { 32 | return; 33 | } 34 | 35 | const HEIF::FourCC majorBrand = readHeif->getMajorBrand(); 36 | uint32_t compatatibleBrand = readHeif->compatibleBrands(); 37 | for (int i = 0; i < compatatibleBrand; i++) 38 | { 39 | const HEIF::FourCC comBrand = readHeif->getCompatibleBrand(i); 40 | printf("%s\n", comBrand.value); 41 | } 42 | 43 | uint32_t itemCount = readHeif->getItemCount(); 44 | uint8_t buffer[128] = {0}; 45 | for (int i = 0; i < itemCount; i++) 46 | { 47 | Item *item = readHeif->getItem(i); 48 | if (item->isExifItem()) 49 | { 50 | ExifItem *exifItem = (ExifItem *)item; 51 | uint64_t datasize = exifItem->getDataSize(); 52 | const uint8_t *data = exifItem->getData(); 53 | memcpy(buffer, data, datasize); 54 | } 55 | else if (item->isXMPItem()) 56 | { 57 | XMPItem *xmpItem = (XMPItem *)item; 58 | std::string str((const char *)xmpItem->getData(), xmpItem->getDataSize()); 59 | Exiv2::XmpData xmpData; 60 | if (0 != Exiv2::XmpParser::decode(xmpData, str)) 61 | { 62 | throw Exiv2::Error(Exiv2::kerErrorMessage, "Failed to decode XMP data"); 63 | } 64 | for (auto iter = xmpData.begin(); iter != xmpData.end(); iter++) 65 | { 66 | std::string key = iter->key(); 67 | const Exiv2::Value &value = iter->value(); 68 | } 69 | bool trueCut = false; 70 | if (xmpData.findKey(Exiv2::XmpKey("Xmp.dc.TrueCut")) != xmpData.end()) 71 | { 72 | trueCut = xmpData["Xmp.dc.TrueCut"].value().toLong() == 1 ? true : false; 73 | } 74 | const Exiv2::Value &corporation_value = xmpData["Xmp.dc.corporation"].value(); 75 | auto iter = xmpData.findKey(Exiv2::XmpKey("Xmp.dc.xxx")); 76 | if (iter != xmpData.end()) 77 | { 78 | const Exiv2::Value &xxx_value = xmpData["Xmp.dc.xxx"].value(); 79 | } 80 | std::string corporation = corporation_value.toString(); 81 | std::cout << corporation << std::endl; 82 | } 83 | } 84 | 85 | uint32_t imageCount = readHeif->getImageCount(); 86 | for (int i = 0; i < imageCount; i++) 87 | { 88 | ImageItem *imageItem = readHeif->getImage(i); 89 | int width = imageItem->width(); 90 | int height = imageItem->height(); 91 | 92 | printf("====ImageItem: name:%s, w:%d, h:%d, isThumb:%d, address:%p\n", imageItem->getName().c_str(), width, height, imageItem->isThumbnailImage() ? 1 : 0, imageItem); 93 | } 94 | 95 | HEVCCodedImageItem *primaryItem = (HEVCCodedImageItem *)readHeif->getPrimaryItem(); 96 | uint32_t propertyCount = readHeif->getPropertyCount(); 97 | for (int i = 0; i < propertyCount; i++) 98 | { 99 | ItemProperty *property = readHeif->getProperty(i); 100 | } 101 | 102 | uint32_t metadataCount = primaryItem->getMetadataCount(); 103 | for (int i = 0; i < metadataCount; i++) 104 | { 105 | MetaItem *mataItem = primaryItem->getMetadata(i); 106 | } 107 | 108 | uint32_t thumbnailsCount = primaryItem->getThumbnailCount(); 109 | for (int i = 0; i < thumbnailsCount; i++) 110 | { 111 | ImageItem *thumb = primaryItem->getThumbnail(i); 112 | uint32_t width = thumb->width(); 113 | uint32_t height = thumb->height(); 114 | printf("====thumb :w:%d, h:%d, address:%p\n", width, height, thumb); 115 | 116 | if (thumb->isCodedImage()) 117 | { 118 | CodedImageItem *codeImageItem = (CodedImageItem *)thumb; 119 | HEIF::MediaFormat format = codeImageItem->getMediaFormat(); 120 | HEIF::FourCC fourCC = codeImageItem->getDecoderCodeType(); 121 | if (format == HEIF::MediaFormat::HEVC) 122 | { 123 | HEVCCodedImageItem *hevcCodedImageItem = (HEVCCodedImageItem *)codeImageItem; 124 | DecoderConfig *decoderConfig = hevcCodedImageItem->getDecoderConfiguration(); 125 | } 126 | } 127 | } 128 | 129 | int width = primaryItem->width(); 130 | int height = primaryItem->height(); 131 | DecoderConfig *readerDecoderConfig = primaryItem->getDecoderConfiguration(); 132 | uint32_t vps_pps_sps_size; 133 | uint8_t *vps_pps_sps; 134 | readerDecoderConfig->getConfig(vps_pps_sps, vps_pps_sps_size); 135 | const unsigned char *imageData = primaryItem->getItemData(); 136 | uint64_t datasize = primaryItem->getItemDataSize(); 137 | 138 | AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H265); 139 | AVCodecContext *avcodecCtx = avcodec_alloc_context3(codec); 140 | 141 | avcodecCtx->extradata_size = vps_pps_sps_size; 142 | avcodecCtx->extradata = (uint8_t *)malloc(avcodecCtx->extradata_size); 143 | memcpy(avcodecCtx->extradata, vps_pps_sps, avcodecCtx->extradata_size); 144 | 145 | if (avcodec_open2(avcodecCtx, codec, nullptr) < 0) 146 | { 147 | return; 148 | } 149 | AVPacket pkt; 150 | av_init_packet(&pkt); 151 | 152 | // uint8_t* data = new uint8_t[vps_pps_sps_size+datasize]; 153 | 154 | // memcpy(data, vps_pps_sps, vps_pps_sps_size); 155 | // memcpy(data+vps_pps_sps_size, imageData, datasize); 156 | 157 | uint8_t *data = new uint8_t[datasize]; 158 | memcpy(data, imageData, datasize); 159 | 160 | av_packet_from_data(&pkt, data, datasize); 161 | FILE *oFile = fopen("./out_352x288_one_frame.yuv", "wb"); 162 | auto decodecallback = [&](AVCodecContext *ctx, const AVFrame *frame) { 163 | WriteYUV420P10LEToFile(frame, oFile); 164 | printf("解码成功\n"); 165 | }; 166 | decode(avcodecCtx, &pkt, decodecallback); 167 | av_packet_unref(&pkt); 168 | 169 | decode(avcodecCtx, nullptr, decodecallback); 170 | 171 | avcodec_close(avcodecCtx); 172 | 173 | fclose(oFile); 174 | } 175 | 176 | #endif -------------------------------------------------------------------------------- /src/decode_video_output_one_image.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODE_VIDEO_OUTPUT_ONE_IMAGE_H_H_H 2 | #define DECODE_VIDEO_OUTPUT_ONE_IMAGE_H_H_H 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | 7 | static AVFormatContext* inFormatCtx = NULL; 8 | static AVCodecContext* vDecCodecContext = NULL; 9 | AVFilterContext *buffersink_ctx; 10 | AVFilterContext *buffersrc_ctx; 11 | AVFilterGraph *filter_graph; 12 | static int video_stream_index = -1; 13 | 14 | /** 15 | * 生成缩略图,默认第一帧视频帧生成缩略图,宽高为视频宽高 16 | * @inputVideoFile: 输入视频文件 17 | * @outImageFile: 输出图片文件 18 | * @width: 缩略图片宽 19 | * @height: 缩略图片高 20 | * @nframe: 截取第几帧视频生成缩略图 21 | */ 22 | int thumb(const char* inputVideoFile, const char* outImageFile, unsigned int width = 0, unsigned int height = 0, unsigned int nframe = 1); 23 | 24 | 25 | static int init_filters(const char *filters_descr) 26 | { 27 | char args[512]; 28 | int ret = 0; 29 | const AVFilter *buffersrc = avfilter_get_by_name("buffer"); 30 | const AVFilter *buffersink = avfilter_get_by_name("buffersink"); 31 | AVFilterInOut *outputs = avfilter_inout_alloc(); 32 | AVFilterInOut *inputs = avfilter_inout_alloc(); 33 | AVRational time_base = inFormatCtx->streams[video_stream_index]->time_base; 34 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; 35 | 36 | filter_graph = avfilter_graph_alloc(); 37 | if (!outputs || !inputs || !filter_graph) { 38 | ret = AVERROR(ENOMEM); 39 | goto end; 40 | } 41 | 42 | /* buffer video source: the decoded frames from the decoder will be inserted here. */ 43 | snprintf(args, sizeof(args), 44 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 45 | vDecCodecContext->width, vDecCodecContext->height, vDecCodecContext->pix_fmt, 46 | time_base.num, time_base.den, 47 | vDecCodecContext->sample_aspect_ratio.num, vDecCodecContext->sample_aspect_ratio.den); 48 | 49 | ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", 50 | args, NULL, filter_graph); 51 | if (ret < 0) { 52 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); 53 | goto end; 54 | } 55 | 56 | /* buffer video sink: to terminate the filter chain. */ 57 | ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", 58 | NULL, NULL, filter_graph); 59 | if (ret < 0) { 60 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); 61 | goto end; 62 | } 63 | 64 | ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, 65 | AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); 66 | if (ret < 0) { 67 | av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); 68 | goto end; 69 | } 70 | 71 | /* 72 | * Set the endpoints for the filter graph. The filter_graph will 73 | * be linked to the graph described by filters_descr. 74 | */ 75 | 76 | /* 77 | * The buffer source output must be connected to the input pad of 78 | * the first filter described by filters_descr; since the first 79 | * filter input label is not specified, it is set to "in" by 80 | * default. 81 | */ 82 | outputs->name = av_strdup("in"); 83 | outputs->filter_ctx = buffersrc_ctx; 84 | outputs->pad_idx = 0; 85 | outputs->next = NULL; 86 | 87 | /* 88 | * The buffer sink input must be connected to the output pad of 89 | * the last filter described by filters_descr; since the last 90 | * filter output label is not specified, it is set to "out" by 91 | * default. 92 | */ 93 | inputs->name = av_strdup("out"); 94 | inputs->filter_ctx = buffersink_ctx; 95 | inputs->pad_idx = 0; 96 | inputs->next = NULL; 97 | 98 | if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, 99 | &inputs, &outputs, NULL)) < 0) 100 | goto end; 101 | 102 | if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) 103 | goto end; 104 | 105 | end: 106 | avfilter_inout_free(&inputs); 107 | avfilter_inout_free(&outputs); 108 | 109 | return ret; 110 | } 111 | 112 | int decode_video_output_one_image_test(){ 113 | const char* inputVideoFile = "./123.mp4"; 114 | const char* outImageFile = "./123.jpeg"; 115 | unsigned int width = 0; 116 | unsigned int height = 0; 117 | unsigned int nframe = 1; 118 | return thumb(inputVideoFile, outImageFile, width, height, nframe); 119 | } 120 | 121 | 122 | int thumb(const char* inputVideoFile, const char* outImageFile, unsigned int width, unsigned int height, unsigned int nframe){ 123 | 124 | AVFormatContext* outFormatCtx = NULL; 125 | AVCodecContext* vEncCodecContext = NULL; 126 | AVCodec* vDecCodec = NULL; 127 | AVCodec* vEncCodec = NULL; 128 | int ret = -1; 129 | int m, n; 130 | AVPacket avpacket; 131 | AVFrame* avframe = NULL; 132 | AVRational timebase; 133 | OnDecodeSuccess decodeCallback; 134 | OnEncodeSuccess encodeCallback; 135 | bool bVideoFilter = false; 136 | if(width > 0 && height > 0){ 137 | bVideoFilter = true; 138 | } 139 | if(avformat_open_input(&inFormatCtx, inputVideoFile, NULL, NULL) < 0){ 140 | return -1; 141 | } 142 | if(avformat_find_stream_info(inFormatCtx, nullptr) < 0){ 143 | goto end; 144 | } 145 | for(int i=0; inb_streams; i++){ 146 | AVStream* stream = inFormatCtx->streams[i]; 147 | if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ 148 | video_stream_index = i; 149 | const AVCodec* pCodec = avcodec_find_decoder(stream->codecpar->codec_id); 150 | vDecCodecContext = avcodec_alloc_context3(pCodec); 151 | if(avcodec_parameters_to_context(vDecCodecContext, stream->codecpar)<0){ 152 | goto end; 153 | } 154 | if(avcodec_open2(vDecCodecContext, pCodec, NULL) < 0){ 155 | goto end; 156 | } 157 | timebase = vDecCodecContext->time_base; 158 | if(!bVideoFilter){ 159 | width = stream->codecpar->width; 160 | height = stream->codecpar->height; 161 | } 162 | break; 163 | } 164 | } 165 | if(video_stream_index == -1) 166 | goto end; 167 | if(avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, outImageFile) < 0){ 168 | goto end; 169 | } 170 | avformat_new_stream(outFormatCtx, 0); 171 | 172 | if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE)) { 173 | ret = avio_open(&outFormatCtx->pb, outImageFile, AVIO_FLAG_WRITE); 174 | if (ret < 0) { 175 | fprintf(stderr, "Could not open output file '%s'", outImageFile); 176 | goto end; 177 | } 178 | } 179 | 180 | //init_filters(NULL); 181 | 182 | vEncCodec = avcodec_find_encoder(outFormatCtx->oformat->video_codec); 183 | vEncCodecContext = avcodec_alloc_context3(vEncCodec); 184 | vEncCodecContext->codec_id = vEncCodec->id; 185 | vEncCodecContext->codec_type = AVMEDIA_TYPE_VIDEO; 186 | vEncCodecContext->pix_fmt = AV_PIX_FMT_YUVJ420P; 187 | //vEncCodecContext->color_range = AVCOL_RANGE_MPEG; 188 | vEncCodecContext->width = width; 189 | vEncCodecContext->height = height; 190 | vEncCodecContext->time_base.num = 1; 191 | vEncCodecContext->time_base.den = 25; 192 | if (avcodec_open2(vEncCodecContext, vEncCodec,NULL) < 0){ 193 | goto end; 194 | } 195 | 196 | if(avformat_write_header(outFormatCtx, nullptr) < 0){ 197 | goto end; 198 | } 199 | 200 | //计算从第几个关键帧开始解码,解几帧之后开始保存 201 | //m = nframe / timebase.den; 202 | //n = nframe % timebase.den; 203 | 204 | decodeCallback = [&](AVCodecContext *ctx, const AVFrame* frame){ 205 | //if(n == 0){//定入图片文件 206 | encode(vEncCodecContext, frame, encodeCallback); 207 | //}else{ 208 | // n--; 209 | //} 210 | }; 211 | 212 | encodeCallback = [&](AVCodecContext* ctx, const AVPacket* avpkt){ 213 | AVPacket* pkt = av_packet_clone(avpkt); 214 | av_write_frame(outFormatCtx, pkt); 215 | av_packet_unref(pkt); 216 | av_write_trailer(outFormatCtx); 217 | }; 218 | 219 | av_init_packet(&avpacket); 220 | while((ret = av_read_frame(inFormatCtx, &avpacket)) == 0){ 221 | if(avpacket.stream_index != video_stream_index){ 222 | continue; 223 | } 224 | // if(m > 0){ 225 | // if(avpacket.flags & AV_PKT_FLAG_KEY){ 226 | // m--; 227 | // } 228 | // continue; 229 | // } 230 | ret = decode(vDecCodecContext, &avpacket, decodeCallback); 231 | // av_packet_unref(&avpacket); 232 | // if(n <= 0){ 233 | // break; 234 | // } 235 | break; 236 | } 237 | decode(vDecCodecContext, nullptr, decodeCallback); 238 | end: 239 | avformat_close_input(&inFormatCtx); 240 | if(vDecCodecContext){ 241 | avcodec_close(vDecCodecContext); 242 | } 243 | if(vEncCodecContext){ 244 | avcodec_close(vEncCodecContext); 245 | } 246 | if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE)){ 247 | avio_closep(&outFormatCtx->pb); 248 | } 249 | return ret; 250 | } 251 | 252 | 253 | #endif -------------------------------------------------------------------------------- /src/decode_video_output_yuv420_test.h: -------------------------------------------------------------------------------- 1 | #ifndef DECODE_VIDEO_OUTPUT_YUV420_TEST_H_H_ 2 | #define DECODE_VIDEO_OUTPUT_YUV420_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "avframe_util.h" 11 | #include "codecimpl.h" 12 | 13 | 14 | int decode_video_output_yuv420_test(){ 15 | const char * inFileName = "./123.mp4"; 16 | AVFormatContext * avformatctx = nullptr; 17 | AVDictionary *options = NULL; 18 | av_dict_set_int(&options, "probesize", 10240L, 0); 19 | av_dict_set_int(&options, "analyzeduration", 100L, 0); 20 | if(avformat_open_input(&avformatctx, inFileName, nullptr, &options)<0){ 21 | return -1; 22 | } 23 | 24 | AVDictionaryEntry* entry = nullptr; 25 | while(entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)){ 26 | printf("key:%s, value:%s\n", entry->key, entry->value); 27 | } 28 | //释放AVDictionary 29 | av_dict_free(&options); 30 | 31 | avformat_find_stream_info(avformatctx, nullptr); 32 | AVCodecContext** pAVCodecCtx = (AVCodecContext**)malloc(avformatctx->nb_streams*sizeof(AVCodecContext*)); 33 | int audio_stream_index, video_stream_index; 34 | for(int i=0; inb_streams; i++){ 35 | AVStream* stream = avformatctx->streams[i]; 36 | const AVCodec* pCodec = avcodec_find_decoder(stream->codecpar->codec_id); 37 | pAVCodecCtx[i] = avcodec_alloc_context3(pCodec); 38 | avcodec_parameters_to_context(pAVCodecCtx[i], stream->codecpar); 39 | 40 | av_codec_set_pkt_timebase(pAVCodecCtx[i], stream->time_base); 41 | if(avcodec_open2(pAVCodecCtx[i], pCodec, nullptr)<0){ 42 | return -1; 43 | } 44 | if(pAVCodecCtx[i]->codec_type == AVMEDIA_TYPE_VIDEO){ 45 | video_stream_index = i; 46 | }else if(pAVCodecCtx[i]->codec_type == AVMEDIA_TYPE_AUDIO){ 47 | audio_stream_index = i; 48 | } 49 | } 50 | 51 | FILE* oFile = fopen("./out352x288.yuv", "wb"); 52 | 53 | auto callback = [&](AVCodecContext *ctx, const AVFrame* frame){ 54 | if(ctx->codec_type==AVMEDIA_TYPE_VIDEO){ 55 | AVFrame* avFrame = av_frame_clone(frame); 56 | int num = 1; //分子 57 | int den = avformatctx->streams[video_stream_index]->r_frame_rate.num;//分母 58 | avFrame->pts = av_rescale_q(frame->pts, avformatctx->streams[video_stream_index]->time_base, AVRational{num, den}); 59 | printf("pts:%ld 转成 %ld\n", frame->pts, avFrame->pts); 60 | WriteYUV420ToFile(avFrame, oFile); 61 | }else if(ctx->codec_type==AVMEDIA_TYPE_AUDIO){ 62 | //printf("解码音频\n"); 63 | int t_data_size = av_samples_get_buffer_size( 64 | NULL, frame->channels, 65 | frame->nb_samples, 66 | (AVSampleFormat)frame->format, 67 | 0); 68 | // if(av_sample_fmt_is_planar((AVSampleFormat)frame->format)){ 69 | // uint8_t *buf = (uint8_t *)malloc(t_data_size); 70 | // interleave(inFrame->data, buf, 71 | // inFrame->channels, (AVSampleFormat)inFrame->format, t_data_size); 72 | // }else{ 73 | 74 | // } 75 | } 76 | }; 77 | 78 | AVPacket *packet = av_packet_alloc(); 79 | av_init_packet(packet); 80 | while(1){ 81 | int ret = av_read_frame(avformatctx, packet); 82 | if(ret!=0){ 83 | printf("read error or file end\n"); 84 | break; 85 | } 86 | decode(pAVCodecCtx[packet->stream_index], packet, callback); 87 | av_packet_unref(packet); 88 | } 89 | 90 | decode(pAVCodecCtx[video_stream_index], nullptr, callback); 91 | 92 | 93 | for(int i=0; inb_streams; i++){ 94 | avcodec_close(pAVCodecCtx[i]); 95 | } 96 | free(pAVCodecCtx); 97 | 98 | avformat_close_input(&avformatctx); 99 | 100 | av_packet_free(&packet); 101 | 102 | return 0; 103 | } 104 | 105 | #endif -------------------------------------------------------------------------------- /src/encode_frames_yuv420p_output_heif.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_FRAMES_YUV420P_OUTPUT_HEIF_H_H_ 2 | #define ENCODE_FRAMES_YUV420P_OUTPUT_HEIF_H_H_ 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | using namespace HEIFPP; 26 | 27 | 28 | // static bool parseNalu(const uint8_t* data, uint64_t datalen, HEIF::Array& decoderSpecificInfo, HEIF::Array& hevcData ){ 29 | // NAL_State d; 30 | // d.init_parse(data, datalen); 31 | // int flags = 0; 32 | // for(;;){ 33 | // const std::uint8_t* nal_data = nullptr; 34 | // std::uint64_t nal_len = 0; 35 | // if (!d.parse_byte_stream(nal_data, nal_len)) 36 | // { 37 | // break; 38 | // } 39 | // int type; 40 | // type = (nal_data[0] >> 1) & 0x3f; 41 | // if(( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::PREFIX_SEI_NUT || ( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::SUFFIX_SEI_NUT){ 42 | // continue; 43 | // } 44 | // if(( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_VPS ||( HEIF::DecoderSpecInfoType) type == HEIF::DecoderSpecInfoType::HEVC_SPS || ( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_PPS){ 45 | // std::uint32_t index = 0; 46 | // if (( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_VPS) 47 | // index = 0; 48 | // else if (( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_SPS) 49 | // index = 1; 50 | // else 51 | // index = 2; 52 | // flags |= 1u << index; 53 | // decoderSpecificInfo[index].decSpecInfoType = ( HEIF::DecoderSpecInfoType)type; 54 | // decoderSpecificInfo[index].decSpecInfoData = HEIF::Array(nal_len + 4); 55 | // decoderSpecificInfo[index].decSpecInfoData[0] = decoderSpecificInfo[index].decSpecInfoData[1] = 56 | // decoderSpecificInfo[index].decSpecInfoData[2] = 0; 57 | // decoderSpecificInfo[index].decSpecInfoData[3] = 1; 58 | // std::memcpy(decoderSpecificInfo[index].decSpecInfoData.elements + 4, nal_data, nal_len); 59 | // }else if( type == 16 || type == 17 || type == 18 || 60 | // type == 19 || type == 20 || type == 21 ){ 61 | // hevcData = HEIF::Array(nal_len + 4); 62 | // hevcData[0] = hevcData[1] = hevcData[2] = 0; 63 | // hevcData[3] = 1; 64 | // std::memcpy(hevcData.elements + 4, nal_data, nal_len); 65 | // }else{ 66 | // return false; 67 | // } 68 | 69 | // } 70 | 71 | // if (flags > 0 && flags != 7 ) 72 | // { 73 | // return false; 74 | // } 75 | // return true; 76 | // } 77 | 78 | int encode_yuv420_output_heif2(){ 79 | 80 | const char * oFileName = "./testsequeue.heic"; 81 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 82 | if(rFile == NULL) 83 | return -1; 84 | int width = 352; 85 | int height = 288; 86 | 87 | const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H265); 88 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 89 | 90 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H265, width, height); 91 | //pCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 92 | av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0); 93 | av_opt_set(pCodecCtx->priv_data, "x265-params", "no-info=true", 0); 94 | 95 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 96 | if (ret < 0) { 97 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 98 | exit(1); 99 | } 100 | 101 | HEIF::Array decoderSpecificInfo = HEIF::Array(3); 102 | // HEIF::Array hevcData; 103 | // if(!parseNalu(pCodecCtx->extradata, pCodecCtx->extradata_size, decoderSpecificInfo, hevcData)){ 104 | // return -1; 105 | // } 106 | bool b = false; 107 | std::vector> hevcDatas; 108 | 109 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 110 | HEIF::Array hevcData; 111 | if(!b || (avpkt->flags & AV_PKT_FLAG_KEY)){//avpkt->flags & AV_PKT_FLAG_KEY 每个I帧前都有可以有sps和pps,如果不把它们拆分出来直接保存到videosample中会造成花屏 112 | printf("=======1======\n"); 113 | if(!parseNalu(avpkt->data, avpkt->size, decoderSpecificInfo, hevcData)){ 114 | printf("=======2======\n"); 115 | }else{ 116 | b = true; 117 | } 118 | }else{ 119 | hevcData = HEIF::Array(avpkt->size); 120 | std::memcpy(hevcData.elements, avpkt->data, avpkt->size); 121 | } 122 | hevcDatas.push_back(hevcData); 123 | }; 124 | 125 | AVFrame* inframe = av_frame_alloc(); 126 | int i=0; 127 | while(1){ 128 | if(feof(rFile)) 129 | break; 130 | 131 | inframe->format = AV_PIX_FMT_YUV420P; 132 | inframe->width = width; 133 | inframe->height = height; 134 | av_frame_get_buffer(inframe, 32); 135 | 136 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 137 | 138 | inframe->pts = i++; 139 | encode(pCodecCtx, inframe, callback); 140 | 141 | av_frame_unref(inframe); 142 | } 143 | 144 | encode(pCodecCtx, nullptr, callback); 145 | 146 | 147 | avcodec_close(pCodecCtx); 148 | av_frame_free(&inframe); 149 | 150 | fclose(rFile); 151 | 152 | Heif *heif = new Heif(); 153 | // heif->setMajorBrand(HEIF::FourCC("mif1")); 154 | heif->setMajorBrand("msf1"); 155 | heif->addCompatibleBrand(HEIF::FourCC("heic")); 156 | heif->addCompatibleBrand(HEIF::FourCC("hevc")); 157 | heif->addCompatibleBrand(HEIF::FourCC("mif1")); 158 | heif->addCompatibleBrand(HEIF::FourCC("iso8")); 159 | 160 | DecoderConfig* config = new HEVCDecoderConfiguration(heif); 161 | HEIF::ErrorCode error = config->setConfig(decoderSpecificInfo); 162 | 163 | 164 | VideoTrack* videoTrack = new VideoTrack(heif); 165 | videoTrack->setTimescale(1000); 166 | for (int i = 0; i < hevcDatas.size(); ++i) { 167 | HEIFPP::VideoSample* imageSeqSample = new HEIFPP::VideoSample(heif); 168 | imageSeqSample->setType(HEIF::FourCC("hvc1")); 169 | imageSeqSample->setDecoderConfiguration(config); 170 | imageSeqSample->setItemData(hevcDatas[i].elements, hevcDatas[i].size); 171 | imageSeqSample->setDuration(40); 172 | videoTrack->addSample(imageSeqSample); 173 | } 174 | 175 | HEVCCodedImageItem* imageItem = new HEVCCodedImageItem(heif); 176 | imageItem->setSize(width, height); 177 | Result r = imageItem->setDecoderConfiguration(config); 178 | imageItem->setItemData(hevcDatas[0].elements, hevcDatas[0].size); 179 | heif->setPrimaryItem(imageItem); 180 | 181 | Result saveRet = heif->save(oFileName); 182 | 183 | if(saveRet != Result::OK){ 184 | return -1; 185 | } 186 | return 0; 187 | } 188 | 189 | #endif -------------------------------------------------------------------------------- /src/encode_oneframe_yuv420p10le_output_heif.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_ONEFRAME_YUV420P10LE_OUTPUT_HEIF_H_H_ 2 | #define ENCODE_ONEFRAME_YUV420P10LE_OUTPUT_HEIF_H_H_ 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | #include 12 | #include 13 | #include 14 | 15 | using namespace HEIFPP; 16 | 17 | int encode_yuv420p10le_output_heif(){ 18 | 19 | uint8_t *vps_pps_sps = new uint8_t[1024]; 20 | unsigned int par_size = 0; 21 | unsigned char *h265raw; 22 | 23 | uint64_t h265raw_size = 0; 24 | 25 | 26 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 27 | uint64_t start_pos = 0; 28 | uint64_t end_pos = avpkt->size; 29 | uint64_t nulu_size; 30 | uint8_t* source = avpkt->data; 31 | uint8_t* nulu_data; 32 | int nuluNum = 0; 33 | uint8_t head[4] = {0x00, 0x00, 0x00, 0x01}; 34 | h265raw = new uint8_t[end_pos + 10]; 35 | while((nulu_data = getNulu(source, start_pos, end_pos, nulu_size)) != nullptr){ 36 | nuluNum++; 37 | int nulu_type = (nulu_data[0] & 0x7E)>>1; 38 | // NAL_BLA_W_LP = 16,  39 | // NAL_BLA_W_RADL = 17,  40 | // NAL_BLA_N_LP = 18,  41 | // NAL_IDR_W_RADL = 19,  42 | // NAL_IDR_N_LP = 20,  43 | // NAL_CRA_NUT = 21,  44 | // NAL_VPS = 32,  45 | // NAL_SPS = 33,  46 | // NAL_PPS = 34, 47 | if(nulu_type == 32){ 48 | printf("VPS\n"); 49 | memcpy(vps_pps_sps + par_size, head, 4); 50 | par_size += 4; 51 | memcpy(vps_pps_sps + par_size, nulu_data, nulu_size); 52 | par_size += nulu_size; 53 | }else if(nulu_type == 33){ 54 | printf("SPS\n"); 55 | memcpy(vps_pps_sps + par_size, head, 4); 56 | par_size += 4; 57 | memcpy(vps_pps_sps + par_size, nulu_data, nulu_size); 58 | par_size += nulu_size; 59 | }else if(nulu_type == 34){ 60 | printf("PPS\n"); 61 | memcpy(vps_pps_sps + par_size, head, 4); 62 | par_size += 4; 63 | memcpy(vps_pps_sps + par_size, nulu_data, nulu_size); 64 | par_size += nulu_size; 65 | }else if(nulu_type == 39 || nulu_type == 40){ 66 | printf("SEI\n"); 67 | memcpy(h265raw + h265raw_size, head, 4); 68 | h265raw_size += 4; 69 | memcpy(h265raw + h265raw_size, nulu_data, nulu_size); 70 | h265raw_size += nulu_size; 71 | }else if(nulu_type == 16 || nulu_type == 17 || nulu_type == 18 || 72 | nulu_type == 19 || nulu_type == 20 || nulu_type == 21){ 73 | printf("I Frame, type: %d\n", nulu_type); 74 | memcpy(h265raw + h265raw_size, head, 4); 75 | h265raw_size += 4; 76 | memcpy(h265raw + h265raw_size, nulu_data, nulu_size); 77 | h265raw_size += nulu_size; 78 | }else{ 79 | printf("############%d###########\n", nulu_type); 80 | } 81 | delete []nulu_data; 82 | } 83 | }; 84 | 85 | const char * oFileName = "./test2.heif"; 86 | 87 | #if 1 88 | FILE* rFile = fopen("./output.yuv","rb"); 89 | if(rFile == NULL) 90 | return -1; 91 | 92 | 93 | int width = 352; 94 | int height = 288; 95 | 96 | 97 | const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H265); 98 | 99 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 100 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H265, width, height); 101 | pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P10LE; 102 | 103 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 104 | if (ret < 0) { 105 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 106 | exit(1); 107 | } 108 | 109 | 110 | AVFrame* inframe = av_frame_alloc(); 111 | 112 | inframe->format = AV_PIX_FMT_YUV420P10LE; 113 | inframe->width = width; 114 | inframe->height = height; 115 | av_frame_get_buffer(inframe, 32); 116 | 117 | ReadYUV420P10LEFromFile(inframe, rFile);//从yuv文件填充AVFrame 118 | inframe->pts = 0; 119 | encode(pCodecCtx, inframe, callback); 120 | av_frame_unref(inframe); 121 | 122 | encode(pCodecCtx, nullptr, callback); 123 | 124 | // Heif* readHeif = new Heif(); 125 | // if(readHeif->load("./111.heic") != Result::OK){ 126 | // return -1; 127 | // } 128 | 129 | // HEVCCodedImageItem* primaryItem = (HEVCCodedImageItem*)readHeif->getPrimaryItem(); 130 | // width = primaryItem->width(); 131 | // height = primaryItem->height(); 132 | // DecoderConfig* readerDecoderConfig = primaryItem->getDecoderConfiguration(); 133 | // readerDecoderConfig->getConfig(vps_pps_sps, par_size); 134 | // const unsigned char* imageData = primaryItem->getItemData(); 135 | // uint64_t datasize = primaryItem->getItemDataSize(); 136 | 137 | 138 | // h265raw_size = datasize; 139 | // h265raw = new unsigned char[h265raw_size]; 140 | // memcpy(h265raw, imageData, h265raw_size); 141 | 142 | #else 143 | Heif* readHeif = new Heif(); 144 | if(readHeif->load("./111.heic") != Result::OK){ 145 | return -1; 146 | } 147 | 148 | HEVCCodedImageItem* primaryItem = (HEVCCodedImageItem*)readHeif->getPrimaryItem(); 149 | int width = primaryItem->width(); 150 | int height = primaryItem->height(); 151 | DecoderConfig* readerDecoderConfig = primaryItem->getDecoderConfiguration(); 152 | uint32_t vps_pps_sps_size; 153 | readerDecoderConfig->getConfig(vps_pps_sps, vps_pps_sps_size); 154 | const unsigned char* imageData = primaryItem->getItemData(); 155 | uint64_t datasize = primaryItem->getItemDataSize(); 156 | 157 | AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H265); 158 | AVCodecContext* avcodecCtx = avcodec_alloc_context3(codec); 159 | if(avcodec_open2(avcodecCtx, codec, nullptr) < 0){ 160 | return -1; 161 | } 162 | 163 | 164 | 165 | 166 | AVPacket pkt; 167 | av_init_packet(&pkt); 168 | 169 | uint8_t* data = new uint8_t[vps_pps_sps_size+datasize]; 170 | 171 | memcpy(data, vps_pps_sps, vps_pps_sps_size); 172 | memcpy(data+vps_pps_sps_size, imageData, datasize); 173 | 174 | av_packet_from_data(&pkt, data, vps_pps_sps_size+datasize); 175 | 176 | AVFrame * inframe; 177 | auto decodecallback = [&](AVCodecContext *ctx, const AVFrame* frame){ 178 | inframe = av_frame_clone(frame); 179 | printf("解码成功\n"); 180 | }; 181 | 182 | decode(avcodecCtx, &pkt, decodecallback); 183 | av_packet_unref(&pkt); 184 | 185 | inframe->pts = 0; 186 | 187 | const AVCodec *encodec = avcodec_find_encoder(AV_CODEC_ID_H265); 188 | 189 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(encodec); 190 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H265, width, height); 191 | pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P10LE; 192 | 193 | int ret = avcodec_open2(pCodecCtx, encodec, NULL); 194 | if (ret < 0) { 195 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 196 | exit(1); 197 | } 198 | 199 | vps_pps_sps = new unsigned char[pCodecCtx->extradata_size]; 200 | par_size = pCodecCtx->extradata_size; 201 | memcpy(vps_pps_sps, pCodecCtx->extradata, pCodecCtx->extradata_size); 202 | 203 | encode(pCodecCtx, inframe, callback); 204 | av_frame_unref(inframe); 205 | 206 | encode(pCodecCtx, nullptr, callback); 207 | 208 | #endif 209 | 210 | Heif *heif = new Heif(); 211 | heif->setMajorBrand(HEIF::FourCC("mif1")); 212 | // heif->setMajorBrand(HEIF::FourCC("heix")); 213 | heif->addCompatibleBrand(HEIF::FourCC("heix")); 214 | 215 | 216 | 217 | HEVCDecoderConfiguration* config = new HEVCDecoderConfiguration(heif); 218 | HEIF::ErrorCode error = config->setConfig(vps_pps_sps, par_size); 219 | 220 | HEIF::MediaFormat format = config->getMediaFormat(); 221 | 222 | HEVCCodedImageItem* imageItem = new HEVCCodedImageItem(heif); 223 | imageItem->setSize(width, height); 224 | Result r = imageItem->setDecoderConfiguration(config); 225 | imageItem->setItemData(h265raw, h265raw_size); 226 | 227 | 228 | heif->setPrimaryItem(imageItem); 229 | 230 | Result saveRet = heif->save(oFileName); 231 | 232 | if(saveRet != Result::OK){ 233 | return -1; 234 | } 235 | 236 | return 0; 237 | } 238 | 239 | #endif -------------------------------------------------------------------------------- /src/encode_video_output_10bith265_test.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_VIDEO_OUTPUT_10BITH265_TEST_H_H_ 2 | #define ENCODE_VIDEO_OUTPUT_10BITH265_TEST_H_H_ 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | #include "video_filter_tool.h" 12 | 13 | #define NO_VIDEO_FILTER 1 14 | 15 | int encode_video_output_h265_test2(){ 16 | 17 | #if NO_VIDEO_FILTER 18 | const char *inFileName = "./output.yuv"; 19 | #else 20 | const char* inFileName = "./akiyo_cif.yuv"; 21 | #endif 22 | FILE* rFile = fopen(inFileName,"rb"); 23 | if(rFile == NULL) 24 | return -1; 25 | const char * oFileName = "./test2.h265"; 26 | FILE* oFile = fopen(oFileName, "wb"); 27 | if(rFile == NULL) 28 | return -1; 29 | int width = 352; 30 | int height = 288; 31 | 32 | 33 | const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H265); 34 | 35 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 36 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H265, width, height); 37 | pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P10LE; 38 | 39 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 40 | if (ret < 0) { 41 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 42 | exit(1); 43 | } 44 | 45 | #if NO_VIDEO_FILTER 46 | #else 47 | std::shared_ptr videoFilter = std::make_shared(); 48 | AVFilterContext *buffersrcCtx, *buffersinkCtx; 49 | 50 | {//配置filter块 51 | char in_args[512]; 52 | AVPixelFormat pix_fmts = AV_PIX_FMT_YUV420P; 53 | AVRational timebase = { 54 | .num = 1, 55 | .den = 25 56 | }; 57 | snprintf(in_args, sizeof(in_args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 58 | width, height, AV_PIX_FMT_YUV420P, timebase.num, timebase.den, 1,1); 59 | 60 | buffersrcCtx = videoFilter->CreateBufferFilter(in_args, "in"); 61 | enum AVPixelFormat dest_pix_fmts[] = { AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_NONE }; 62 | buffersinkCtx = videoFilter->CreateBufferSinkFilter(dest_pix_fmts, "out"); 63 | 64 | AVFilterInOut* inputs = avfilter_inout_alloc(); 65 | AVFilterInOut* outputs = avfilter_inout_alloc(); 66 | 67 | outputs->name = av_strdup("in"); 68 | outputs->filter_ctx = buffersrcCtx; 69 | outputs->pad_idx = 0; 70 | outputs->next = nullptr; 71 | 72 | inputs->name = av_strdup("out"); 73 | inputs->filter_ctx = buffersinkCtx; 74 | inputs->pad_idx = 0; 75 | inputs->next = nullptr; 76 | 77 | //生成gif最关键的filter配置 78 | char filter_desc[1024]; 79 | snprintf(filter_desc, sizeof(filter_desc),"format=pix_fmts=%d", AV_PIX_FMT_YUV420P10LE); 80 | if(videoFilter->InsertFilter(inputs, outputs, filter_desc) < 0){ 81 | av_log(nullptr, AV_LOG_ERROR, "parse filter graph error\n"); 82 | exit(1); 83 | } 84 | } 85 | 86 | if(!videoFilter->FilterConfig()){ 87 | exit(1); 88 | } 89 | #endif 90 | 91 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 92 | uint64_t start_pos = 0; 93 | uint64_t end_pos = avpkt->size; 94 | uint64_t nulu_size; 95 | uint8_t* source = avpkt->data; 96 | uint8_t* nulu_data; 97 | int nuluNum = 0; 98 | uint8_t head[4] = {0x00, 0x00, 0x00, 0x01}; 99 | while((nulu_data = getNulu(source, start_pos, end_pos, nulu_size)) != nullptr){ 100 | nuluNum++; 101 | int nulu_type = (nulu_data[0] & 0x7E)>>1; 102 | // NAL_BLA_W_LP = 16,  103 | // NAL_BLA_W_RADL = 17,  104 | // NAL_BLA_N_LP = 18,  105 | // NAL_IDR_W_RADL = 19,  106 | // NAL_IDR_N_LP = 20,  107 | // NAL_CRA_NUT = 21,  108 | // NAL_VPS = 32,  109 | // NAL_SPS = 33,  110 | // NAL_PPS = 34, 111 | if(nulu_type == 32){ 112 | printf("VPS\n"); 113 | }else if(nulu_type == 33){ 114 | printf("SPS\n"); 115 | }else if(nulu_type == 34){ 116 | printf("PPS\n"); 117 | }else if(nulu_type == 39 || nulu_type == 40){ 118 | printf("SEI:%d\n", nulu_type); 119 | }else if(nulu_type == 16 || nulu_type == 17 || nulu_type == 18 || 120 | nulu_type == 19 || nulu_type == 20 || nulu_type == 21){ 121 | printf("I Frame, type: %d\n", nulu_type); 122 | }else{ 123 | printf("############%d###########\n", nulu_type); 124 | } 125 | fwrite(head, 1, 4, oFile); 126 | fwrite(nulu_data, 1, nulu_size, oFile); 127 | delete []nulu_data; 128 | } 129 | printf("-----------------%d-----------\n", avpkt->pts); 130 | }; 131 | 132 | AVFrame* inframe = av_frame_alloc(); 133 | #if NO_VIDEO_FILTER 134 | #else 135 | AVFrame *filt_frame = av_frame_alloc(); 136 | #endif 137 | int i=0; 138 | while(1){ 139 | if(feof(rFile)) 140 | break; 141 | 142 | 143 | #if NO_VIDEO_FILTER 144 | inframe->format = AV_PIX_FMT_YUV420P10LE; 145 | inframe->width = width; 146 | inframe->height = height; 147 | av_frame_get_buffer(inframe, 32); 148 | ReadYUV420P10LEFromFile(inframe, rFile);//从yuv文件填充AVFrame 149 | inframe->pts = i++; 150 | encode(pCodecCtx, inframe, callback); 151 | #else 152 | inframe->format = AV_PIX_FMT_YUV420P; 153 | inframe->width = width; 154 | inframe->height = height; 155 | av_frame_get_buffer(inframe, 32); 156 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 157 | videoFilter->AddFrame(buffersrcCtx, inframe); 158 | 159 | while (1) {//从buffersink设备上下文获取视频帧 160 | ret = videoFilter->GetFrame(buffersinkCtx, filt_frame); 161 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 162 | break; 163 | if (ret < 0) 164 | break; 165 | 166 | filt_frame->pts = i++; 167 | encode(pCodecCtx, filt_frame, callback); 168 | 169 | av_frame_unref(filt_frame); 170 | } 171 | #endif 172 | 173 | 174 | av_frame_unref(inframe); 175 | } 176 | #if NO_VIDEO_FILTER 177 | #else 178 | if ((ret = videoFilter->AddFrame(buffersrcCtx, NULL)) >= 0) { 179 | while (1) {//从buffersink设备上下文获取视频帧 180 | ret = videoFilter->GetFrame(buffersinkCtx, filt_frame); 181 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 182 | break; 183 | if (ret < 0) 184 | break; 185 | 186 | filt_frame->pts = i++; 187 | encode(pCodecCtx, filt_frame, callback); 188 | 189 | av_frame_unref(filt_frame); 190 | } 191 | } 192 | #endif 193 | 194 | 195 | encode(pCodecCtx, nullptr, callback); 196 | 197 | avcodec_close(pCodecCtx); 198 | #if NO_VIDEO_FILTER 199 | #else 200 | av_frame_free(&filt_frame); 201 | #endif 202 | av_frame_free(&inframe); 203 | fclose(rFile); 204 | fclose(oFile); 205 | printf("完成输出\n"); 206 | return 0; 207 | } 208 | 209 | #endif -------------------------------------------------------------------------------- /src/encode_video_output_h264_test.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_VIDEO_OUTPUT_H264_TEST_H_H_ 2 | #define ENCODE_VIDEO_OUTPUT_H264_TEST_H_H_ 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | 12 | 13 | int encode_video_output_h264_test(){ 14 | 15 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 16 | if(rFile == NULL) 17 | return -1; 18 | const char * oFileName = "./test.h264"; 19 | FILE* oFile = fopen(oFileName, "wb"); 20 | if(rFile == NULL) 21 | return -1; 22 | int width = 352; 23 | int height = 288; 24 | 25 | 26 | const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264); 27 | 28 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 29 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H264, width, height); 30 | 31 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 32 | if (ret < 0) { 33 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 34 | exit(1); 35 | } 36 | 37 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 38 | 39 | fwrite(avpkt->data,avpkt->size,1,oFile); 40 | 41 | int flags = avpkt->flags; 42 | printf("avpkt->flag:%d, 0x%x%x%x%x%x\n", flags & AV_PKT_FLAG_KEY, avpkt->data[0], avpkt->data[1], avpkt->data[2], avpkt->data[3], avpkt->data[4]); 43 | }; 44 | 45 | AVFrame* inframe = av_frame_alloc(); 46 | int i=0; 47 | while(1){ 48 | if(feof(rFile)) 49 | break; 50 | 51 | inframe->format = AV_PIX_FMT_YUV420P; 52 | inframe->width = width; 53 | inframe->height = height; 54 | av_frame_get_buffer(inframe, 32); 55 | 56 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 57 | inframe->pts = i++; 58 | encode(pCodecCtx, inframe, callback); 59 | 60 | av_frame_unref(inframe); 61 | } 62 | 63 | encode(pCodecCtx, nullptr, callback); 64 | 65 | 66 | avcodec_close(pCodecCtx); 67 | av_frame_free(&inframe); 68 | 69 | fclose(rFile); 70 | fclose(oFile); 71 | printf("完成输出\n"); 72 | return 0; 73 | } 74 | 75 | #endif -------------------------------------------------------------------------------- /src/encode_video_output_h265_test.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_VIDEO_OUTPUT_H265_TEST_H_H_ 2 | #define ENCODE_VIDEO_OUTPUT_H265_TEST_H_H_ 3 | #include "global.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "avframe_util.h" 10 | #include "codecimpl.h" 11 | 12 | 13 | int encode_video_output_h265_test(){ 14 | 15 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 16 | if(rFile == NULL) 17 | return -1; 18 | const char * oFileName = "./test1.h265"; 19 | FILE* oFile = fopen(oFileName, "wb"); 20 | if(rFile == NULL) 21 | return -1; 22 | int width = 352; 23 | int height = 288; 24 | 25 | 26 | const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H265); 27 | 28 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 29 | InitVideoAVCodecCtx(pCodecCtx, AV_CODEC_ID_H265, width, height); 30 | 31 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 32 | if (ret < 0) { 33 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 34 | exit(1); 35 | } 36 | 37 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 38 | 39 | fwrite(avpkt->data,avpkt->size,1,oFile); 40 | 41 | int flags = avpkt->flags; 42 | printf("avpkt->flag:%d, 0x%x%x%x%x%x\n", flags & AV_PKT_FLAG_KEY, avpkt->data[0], avpkt->data[1], avpkt->data[2], avpkt->data[3], avpkt->data[4]); 43 | }; 44 | 45 | AVFrame* inframe = av_frame_alloc(); 46 | int i=0; 47 | while(1){ 48 | if(feof(rFile)) 49 | break; 50 | 51 | inframe->format = AV_PIX_FMT_YUV420P; 52 | inframe->width = width; 53 | inframe->height = height; 54 | av_frame_get_buffer(inframe, 32); 55 | 56 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 57 | inframe->pts = i++; 58 | encode(pCodecCtx, inframe, callback); 59 | 60 | av_frame_unref(inframe); 61 | } 62 | 63 | encode(pCodecCtx, nullptr, callback); 64 | 65 | 66 | avcodec_close(pCodecCtx); 67 | av_frame_free(&inframe); 68 | 69 | fclose(rFile); 70 | fclose(oFile); 71 | 72 | printf("完成输出\n"); 73 | return 0; 74 | } 75 | 76 | #endif -------------------------------------------------------------------------------- /src/encode_video_output_mp4_test.h: -------------------------------------------------------------------------------- 1 | #ifndef ENCODE_VIDEO_OUTPUT_MP3_TEST_H_H_ 2 | #define ENCODE_VIDEO_OUTPUT_MP3_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "avframe_util.h" 11 | #include "codecimpl.h" 12 | 13 | int encode_video_output_mp4_test(){ 14 | 15 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 16 | if(rFile == NULL) 17 | return -1; 18 | const char * oFileName = "./akiyo_cif.mp4"; 19 | int width = 352; 20 | int height = 288; 21 | 22 | AVFormatContext * avformatctx; 23 | if(avformat_alloc_output_context2(&avformatctx, nullptr, nullptr, oFileName)<0){ 24 | return -1; 25 | } 26 | AVStream* vStream = avformat_new_stream(avformatctx, nullptr); 27 | if(vStream == nullptr){ 28 | return -1; 29 | } 30 | 31 | printf("index:%d, n: %d\n",vStream->index, avformatctx->nb_streams); 32 | 33 | if(avio_open(&avformatctx->pb, oFileName, AVIO_FLAG_READ_WRITE)<0){ 34 | return -1; 35 | } 36 | 37 | const AVCodec *codec = avcodec_find_encoder(avformatctx->oformat->video_codec); 38 | 39 | AVCodecContext* pCodecCtx = avcodec_alloc_context3(codec); 40 | InitVideoAVCodecCtx(pCodecCtx, avformatctx->oformat->video_codec, width, height); 41 | 42 | int ret = avcodec_open2(pCodecCtx, codec, NULL); 43 | if (ret < 0) { 44 | fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret)); 45 | exit(1); 46 | } 47 | avcodec_parameters_from_context(vStream->codecpar, pCodecCtx); 48 | //vStream->time_base = AVRational{1, 25000}; 49 | printf("pCodecCtx->time_base:%d, %d, vStream->time_base:%d, %d\n", 50 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, vStream->time_base.num, vStream->time_base.den); 51 | auto callback = [&](AVCodecContext* ctx,const AVPacket* avpkt){ 52 | 53 | AVPacket* pkt = av_packet_clone(avpkt); 54 | av_packet_rescale_ts(pkt, pCodecCtx->time_base, vStream->time_base); 55 | printf("src: pts:%lld, dts:%lld, dest: pts:%lld, dts:%lld\n", avpkt->pts, avpkt->dts, pkt->pts, pkt->dts); 56 | if(pkt->pts < pkt->dts){ 57 | printf("---------------------------------------------------------------\n"); 58 | } 59 | pkt->stream_index = vStream->index; 60 | av_write_frame(avformatctx, pkt); 61 | av_packet_unref(pkt); 62 | av_packet_free(&pkt); 63 | }; 64 | 65 | if(avformat_write_header(avformatctx, nullptr)<0){ 66 | return -1; 67 | } 68 | 69 | printf("after avformat_write_header >>>>pCodecCtx->time_base:%d, %d, vStream->time_base:%d, %d\n", 70 | pCodecCtx->time_base.num, pCodecCtx->time_base.den, vStream->time_base.num, vStream->time_base.den); 71 | 72 | AVFrame* inframe = av_frame_alloc(); 73 | int i=0; 74 | while(1){ 75 | if(feof(rFile)) 76 | break; 77 | 78 | inframe->format = AV_PIX_FMT_YUV420P; 79 | inframe->width = width; 80 | inframe->height = height; 81 | av_frame_get_buffer(inframe, 32); 82 | 83 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 84 | inframe->pts = i++; 85 | encode(pCodecCtx, inframe, callback); 86 | 87 | av_frame_unref(inframe); 88 | } 89 | 90 | encode(pCodecCtx, nullptr, callback); 91 | 92 | av_write_trailer(avformatctx); 93 | 94 | avcodec_close(pCodecCtx); 95 | av_frame_free(&inframe); 96 | 97 | avio_close(avformatctx->pb); 98 | avformat_free_context(avformatctx); 99 | 100 | fclose(rFile); 101 | printf("完成输出\n"); 102 | return 0; 103 | } 104 | 105 | #endif -------------------------------------------------------------------------------- /src/exec_ffmpeg_test.h: -------------------------------------------------------------------------------- 1 | #ifndef EXEC_FFMPEG_TEST_H_H_ 2 | #define EXEC_FFMPEG_TEST_H_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | int spawn(char *program,char **argv) 12 | { 13 | int childPid = fork(); 14 | if(childPid == -1) 15 | { 16 | fprintf(stderr,"error in fork:%s\n",strerror(errno)); 17 | return -1; 18 | } 19 | 20 | if(childPid != 0) //当在父进程中是,fork返回的是子进程id, 当是子进程时,返回0 21 | { 22 | return childPid; 23 | } 24 | else 25 | { 26 | execvp(program,argv); 27 | fprintf(stderr,"error in exec function:%s\n",strerror(errno)); 28 | abort(); 29 | } 30 | } 31 | 32 | int exec_ffmpeg_test() 33 | { 34 | printf("start progrom\n"); 35 | int childStatus= 0; 36 | //char* cmd = "-ss 5 -t 10 -accurate_seek -i ./半壶纱.mp4 -codec copy -avoid_negative_ts 1 ./cut.mp4"; 37 | char *argList[]={"ffmpeg","-ss 5 -t 10 -accurate_seek -i ./半壶纱.mp4 -codec copy -avoid_negative_ts 1 ./cut.mp4",NULL}; 38 | 39 | spawn("/home/caiyu/ffmpeg_install/bin/ffmpeg",argList); 40 | wait(&childStatus); 41 | if(WIFEXITED (childStatus)) 42 | { 43 | printf ("the child process exited normally, with exit code %d\n",WEXITSTATUS (childStatus)); 44 | } 45 | else 46 | { 47 | printf ("the child process exited abnormally\n"); 48 | } 49 | printf("done in main program\n"); 50 | return 0; 51 | } 52 | 53 | #endif -------------------------------------------------------------------------------- /src/ffmpeg_test_main.cpp: -------------------------------------------------------------------------------- 1 | #include "video_avfilter_test.h" 2 | #include "separate_mp4_output_audio_video_mp4.h" 3 | #include "merge_2mp4_output_mp4.h" 4 | #include "exec_ffmpeg_test.h" 5 | #include "encode_video_output_h264_test.h" 6 | #include "encode_video_output_mp4_test.h" 7 | #include "decode_video_output_yuv420_test.h" 8 | #include "decode_h264_test.h" 9 | #include "resample_audio_test.h" 10 | #include "remuxing_test.h" 11 | #include "av_util_dictionary_test.h" 12 | #include "av_util_avclass_avoption_test.h" 13 | #include "decode_video_output_one_image.h" 14 | #include "decode_audio_output_pcm_test.h" 15 | #include "decode_audio_mix_output_pcm.test.h" 16 | #include "audio_filter_aformat_output_pcm.h" 17 | #include "audio_filter_aresample_output_pcm.h" 18 | #include "cut_mp4_test.h" 19 | #include "generate_gif_test.h" 20 | #include "merge_yuv420_test.h" 21 | #include "merge_image_test.h" 22 | #include "encode_video_output_h265_test.h" 23 | #include "encode_video_output_10bith265_test.h" 24 | #include "decode_h265_test.h" 25 | #include "encode_oneframe_yuv420p10le_output_heif.h" 26 | #include "decode_heic_output_yuv420_test.h" 27 | #include "encode_oneframe_yuv420p_output_heif.h" 28 | #include "encode_frames_yuv420p_output_heif.h" 29 | #include "yuv_transfer_test.h" 30 | 31 | int main(){ 32 | // decode_h264_test(); 33 | //decode_h265_test(); 34 | // decode_video_output_yuv420_test(); 35 | //encode_video_output_h264_test(); 36 | // encode_video_output_h265_test(); 37 | // encode_video_output_h265_test2(); 38 | // encode_yuv420p10le_output_heif(); 39 | // encode_yuv420_output_heif(); 40 | //encode_yuv420_output_heif2(); 41 | //encode_video_output_mp4_test(); 42 | // exec_ffmpeg_test(); 43 | //merge_2mp4_output_mp4(); 44 | //separate_mp4_output_audio_video_mp4_test(); 45 | // video_avfilter_test(); 46 | //resample_audio_test(); 47 | //remuxing_test(); 48 | //av_dictionary_test(); 49 | //avclass_avoption_test(); 50 | //decode_video_output_one_image_test(); 51 | //decode_audio_output_pcm_test(); 52 | // audio_filter_aformat_test(); 53 | //audio_filter_aresample_test(); 54 | // audio_filter_test(); 55 | // decode_mix_audio_test(); 56 | //cut_media_file_test(); 57 | // create_gif_test(); 58 | //merge_yuv420_test(); 59 | // merge_files_test(); 60 | //decode_heic_output_yuv420(); 61 | yuv_transfer_test(); 62 | return 0; 63 | } -------------------------------------------------------------------------------- /src/generate_gif_test.h: -------------------------------------------------------------------------------- 1 | #ifndef GENERATE_GIF_TEST_H_H_ 2 | #define GENERATE_GIF_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | #include "video_filter_tool.h" 7 | 8 | /* 9 | @inputFile: 输入文件 10 | @outputFile: 输出文件 11 | @time: 时长 12 | @width: gif宽 13 | @height: gif高 14 | */ 15 | int create_gif(const char* inputFile, const char* outputFile, int time, int width, int height){ 16 | int ret = -1; 17 | AVFormatContext* in_avformat_ctx = nullptr; 18 | AVFormatContext* out_avformat_ctx = nullptr; 19 | //视频进行解码再编码 20 | AVCodecContext *vDecContext = nullptr; 21 | AVCodecContext *vEncContext = nullptr; 22 | int video_stream_index = -1; 23 | AVPacket *packet; 24 | AVFrame *filt_frame; 25 | AVFilterContext* buffersrcCtx; 26 | AVFilterContext* buffersinkCtx; 27 | std::shared_ptr videoFilter = nullptr; 28 | AVPixelFormat dest_pix_fmt = AV_PIX_FMT_PAL8; 29 | int dest_fps = 10; 30 | //带goto语言的变量都需要提前定义如下 31 | if(avformat_open_input(&in_avformat_ctx, inputFile, nullptr, nullptr) < 0){ 32 | goto end; 33 | } 34 | video_stream_index = av_find_best_stream(in_avformat_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); 35 | if(video_stream_index == -1){ 36 | goto end; 37 | } 38 | if(avformat_alloc_output_context2(&out_avformat_ctx, nullptr, nullptr, outputFile) < 0){ 39 | goto end; 40 | } 41 | 42 | {//初始化编解码块 43 | AVStream* stream = in_avformat_ctx->streams[video_stream_index]; 44 | AVStream* out = avformat_new_stream(out_avformat_ctx, nullptr); 45 | 46 | const AVCodec* decCodec = avcodec_find_decoder(stream->codecpar->codec_id); 47 | vDecContext = avcodec_alloc_context3(decCodec); 48 | avcodec_parameters_to_context(vDecContext, stream->codecpar); 49 | if(avcodec_open2(vDecContext, decCodec, nullptr)<0){ 50 | goto end; 51 | } 52 | 53 | const AVCodec* encCodec = avcodec_find_encoder(out_avformat_ctx->oformat->video_codec); 54 | vEncContext = avcodec_alloc_context3(encCodec); 55 | 56 | vEncContext->width = width; 57 | vEncContext->height = height; 58 | vEncContext->codec_type = AVMEDIA_TYPE_VIDEO; 59 | vEncContext->pix_fmt = dest_pix_fmt; 60 | vEncContext->time_base = { 1, dest_fps }; 61 | 62 | if(avcodec_open2(vEncContext, encCodec, nullptr)<0){ 63 | goto end; 64 | } 65 | avcodec_parameters_from_context(out->codecpar, vEncContext); 66 | } 67 | 68 | if (!(out_avformat_ctx->oformat->flags & AVFMT_NOFILE)) { 69 | ret = avio_open(&out_avformat_ctx->pb, outputFile, AVIO_FLAG_WRITE); 70 | if (ret < 0) { 71 | fprintf(stderr, "Could not open output file '%s'", outputFile); 72 | goto end; 73 | } 74 | } 75 | 76 | if(avformat_write_header(out_avformat_ctx, nullptr) < 0){ 77 | goto end; 78 | } 79 | 80 | videoFilter = std::make_shared(); 81 | 82 | {//配置filter块 83 | char in_args[512]; 84 | AVPixelFormat pix_fmts = AV_PIX_FMT_YUV420P; 85 | AVRational timebase = in_avformat_ctx->streams[video_stream_index]->time_base; 86 | snprintf(in_args, sizeof(in_args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 87 | vDecContext->width, vDecContext->height, AV_PIX_FMT_YUV420P, timebase.num, timebase.den, vDecContext->sample_aspect_ratio.num, vDecContext->sample_aspect_ratio.den); 88 | 89 | buffersrcCtx = videoFilter->CreateBufferFilter(in_args, "in"); 90 | enum AVPixelFormat dest_pix_fmts[] = { dest_pix_fmt, AV_PIX_FMT_NONE }; 91 | buffersinkCtx = videoFilter->CreateBufferSinkFilter(dest_pix_fmts, "out"); 92 | 93 | AVFilterInOut* inputs = avfilter_inout_alloc(); 94 | AVFilterInOut* outputs = avfilter_inout_alloc(); 95 | 96 | outputs->name = av_strdup("in"); 97 | outputs->filter_ctx = buffersrcCtx; 98 | outputs->pad_idx = 0; 99 | outputs->next = nullptr; 100 | 101 | inputs->name = av_strdup("out"); 102 | inputs->filter_ctx = buffersinkCtx; 103 | inputs->pad_idx = 0; 104 | inputs->next = nullptr; 105 | 106 | //生成gif最关键的filter配置 107 | char filter_desc[1024]; 108 | snprintf(filter_desc, sizeof(filter_desc),"format=pix_fmts=rgb32,fps=%d,scale=%d:%d:flags=lanczos,split [o1] [o2];[o1] palettegen [p]; [o2] fifo [o3];[o3] [p] paletteuse" 109 | ,dest_fps, width, height); 110 | if(videoFilter->InsertFilter(inputs, outputs, filter_desc) < 0){ 111 | av_log(nullptr, AV_LOG_ERROR, "parse filter graph error\n"); 112 | goto end; 113 | } 114 | } 115 | 116 | if(!videoFilter->FilterConfig()){ 117 | goto end; 118 | } 119 | 120 | packet = av_packet_alloc(); 121 | av_init_packet(packet); 122 | filt_frame = av_frame_alloc(); 123 | while(1){ 124 | int ret = av_read_frame(in_avformat_ctx, packet); 125 | if(ret!=0){ 126 | printf("read error or file end\n"); 127 | break; 128 | } 129 | if(packet->stream_index == video_stream_index){ 130 | if (av_q2d(in_avformat_ctx->streams[video_stream_index]->time_base) * packet->pts > time) { 131 | av_packet_unref(packet); 132 | break;; 133 | } 134 | decode(vDecContext, packet, [&](AVCodecContext* ctx, const AVFrame* frame){ 135 | //对frame进行filter处理 136 | AVFrame* f = av_frame_clone(frame); 137 | // processing one frame 138 | f->pts = frame->best_effort_timestamp; 139 | videoFilter->AddFrame(buffersrcCtx, f); 140 | av_frame_free(&f); 141 | }); 142 | } 143 | 144 | av_packet_unref(packet); 145 | } 146 | 147 | 148 | if ((ret = videoFilter->AddFrame(buffersrcCtx, NULL)) >= 0) { 149 | while (1) {//从buffersink设备上下文获取视频帧 150 | ret = videoFilter->GetFrame(buffersinkCtx, filt_frame); 151 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 152 | break; 153 | if (ret < 0) 154 | break; 155 | 156 | //再次gif编码 157 | encode(vEncContext, filt_frame, [&](AVCodecContext* ctx, const AVPacket* avpkt){ 158 | //写入gif文件 159 | AVPacket* pkt = av_packet_clone(avpkt); 160 | av_interleaved_write_frame(out_avformat_ctx, pkt); 161 | av_packet_free(&pkt); 162 | }); 163 | 164 | av_frame_unref(filt_frame); 165 | } 166 | } 167 | 168 | av_packet_free(&packet); 169 | av_frame_free(&filt_frame); 170 | av_write_trailer(out_avformat_ctx); 171 | ret = 0; 172 | 173 | end: 174 | avformat_close_input(&in_avformat_ctx); 175 | if(vDecContext){ 176 | avcodec_close(vDecContext); 177 | } 178 | if(vEncContext){ 179 | avcodec_close(vEncContext); 180 | } 181 | if (out_avformat_ctx && !(out_avformat_ctx->oformat->flags & AVFMT_NOFILE)){ 182 | avio_closep(&out_avformat_ctx->pb); 183 | } 184 | avformat_free_context(out_avformat_ctx); 185 | 186 | return ret; 187 | } 188 | 189 | 190 | int create_gif_test(){ 191 | const char* inputfile = "./wd_091_tempMovie 6.mov"; 192 | const char* outputfile = "./gen.gif"; 193 | int time = 15; 194 | int width = 128; 195 | int height = 128; 196 | return create_gif(inputfile, outputfile, time, width, height); 197 | } 198 | 199 | #endif -------------------------------------------------------------------------------- /src/global.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef GLOBAL_H_H_ 3 | #define GLOBAL_H_H_ 4 | 5 | extern "C"{ 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | } 18 | 19 | char av_error[AV_ERROR_MAX_STRING_SIZE] = { 0 }; 20 | #define av_err2str(errnum) \ 21 | av_make_error_string(av_error, AV_ERROR_MAX_STRING_SIZE, errnum) 22 | 23 | 24 | void InitVideoAVCodecCtx(AVCodecContext* c, AVCodecID codecId, int width, int height){ 25 | c->codec_id = codecId; 26 | c->codec_type = AVMEDIA_TYPE_VIDEO; 27 | // /* put sample parameters */ 28 | c->bit_rate = 400000; 29 | /* resolution must be a multiple of two */ 30 | c->width = width; 31 | c->height = height; 32 | /* frames per second */ 33 | c->time_base = (AVRational){1, 25}; 34 | c->framerate = (AVRational){25, 1}; 35 | 36 | c->gop_size = 12; 37 | c->max_b_frames = 2; 38 | c->pix_fmt = AV_PIX_FMT_YUV420P; 39 | c->qmin = 10; 40 | c->qmax = 51; 41 | //如果设置了这个参数,只会有一次pps和sps输出 42 | //c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 43 | if (c->codec_id == AV_CODEC_ID_H264) 44 | av_opt_set(c->priv_data, "preset", "slow", 0); 45 | } 46 | 47 | int InitABufferFilter(AVFilterGraph* filterGraph, AVFilterContext** filterctx, const char* name, 48 | AVRational timebase, int samplerate, AVSampleFormat format, uint64_t channel_layout){ 49 | const AVFilter* bufferfilter = avfilter_get_by_name("abuffer"); 50 | *filterctx = NULL; 51 | char in_args[512]; 52 | snprintf(in_args, sizeof(in_args), 53 | "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRId64, 54 | timebase.num, timebase.den, samplerate, 55 | av_get_sample_fmt_name(format), 56 | channel_layout); 57 | return avfilter_graph_create_filter(filterctx, bufferfilter, name, in_args, NULL, filterGraph); 58 | } 59 | 60 | int InitABufferSinkFilter(AVFilterGraph* filterGraph, AVFilterContext** filterctx, const char* name, 61 | AVSampleFormat format, int samplerate, uint64_t channel_layout){ 62 | const AVFilter* buffersinkfilter = avfilter_get_by_name("abuffersink"); 63 | 64 | AVSampleFormat out_sample_fmts[2]; 65 | out_sample_fmts[0]= format; 66 | out_sample_fmts[1] = AV_SAMPLE_FMT_NONE; 67 | 68 | int64_t out_channel_layouts[2]; 69 | out_channel_layouts[0] = channel_layout; 70 | out_channel_layouts[1] = -1; 71 | 72 | int out_sample_rates[2]; 73 | out_sample_rates[0] = samplerate; 74 | out_sample_rates[1] = -1; 75 | 76 | int ret = avfilter_graph_create_filter(filterctx, buffersinkfilter, name, NULL, NULL, filterGraph); 77 | if (ret < 0) { 78 | av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); 79 | 80 | } 81 | do{ 82 | ret = av_opt_set_int_list(*filterctx, "sample_fmts", out_sample_fmts, -1, 83 | AV_OPT_SEARCH_CHILDREN); 84 | if (ret < 0) { 85 | av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n"); 86 | break; 87 | } 88 | ret = av_opt_set_int_list(*filterctx, "channel_layouts", out_channel_layouts, -1, 89 | AV_OPT_SEARCH_CHILDREN); 90 | if (ret < 0) { 91 | av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); 92 | break; 93 | } 94 | ret = av_opt_set_int_list(*filterctx, "sample_rates", out_sample_rates, -1, 95 | AV_OPT_SEARCH_CHILDREN); 96 | if (ret < 0) { 97 | av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); 98 | break; 99 | } 100 | }while(0); 101 | 102 | return ret; 103 | } 104 | 105 | 106 | uint8_t* getNulu(const uint8_t* source, uint64_t& start_pos, uint64_t end_pos, uint64_t& nulu_size){ 107 | nulu_size = 0; 108 | uint64_t pos = start_pos; 109 | start_pos = 0; 110 | while(pos < end_pos - 4){ 111 | if(source[pos]== 0x00 && source[pos+1] == 0x00){ 112 | if(source[pos+2] == 0x01){ 113 | start_pos = pos + 3; 114 | }else if(source[pos+2] == 0x00 && source[pos+3] == 0x01){ 115 | start_pos = pos + 4; 116 | } 117 | } 118 | if(start_pos != 0){ 119 | break; 120 | } 121 | pos += 1; 122 | } 123 | uint8_t* nulu = nullptr; 124 | if(start_pos != 0){ 125 | pos = start_pos; 126 | while(pos < end_pos - 4){ 127 | if(source[pos]== 0x00 && source[pos+1] == 0x00){ 128 | if(source[pos+2] == 0x01){ 129 | break; 130 | }else if(source[pos+2] == 0x00 && source[pos+3] == 0x01){ 131 | break; 132 | } 133 | } 134 | pos += 1; 135 | nulu_size += 1; 136 | } 137 | if(pos >= end_pos - 4){//last nulu 138 | nulu_size += end_pos - pos; 139 | } 140 | nulu = new uint8_t[nulu_size]; 141 | memcpy(nulu, source + start_pos, nulu_size); 142 | start_pos += nulu_size; 143 | } 144 | return nulu; 145 | } 146 | 147 | #endif -------------------------------------------------------------------------------- /src/merge_2mp4_output_mp4.h: -------------------------------------------------------------------------------- 1 | #ifndef MERGE_2MP4_OUTPUT_MP4_H_H_ 2 | #define MERGE_2MP4_OUTPUT_MP4_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | struct _MediaInfo{ 13 | AVFormatContext* inAVFormatCtx; 14 | AVStream* outStream; 15 | int stream_index; 16 | }; 17 | 18 | std::shared_ptr<_MediaInfo> InitStream(AVFormatContext* outFormatCtx, const char* filepath, AVMediaType type){ 19 | AVFormatContext* ofctx = nullptr; 20 | if(avformat_open_input(&ofctx, filepath, nullptr, nullptr)<0){ 21 | return nullptr; 22 | } 23 | avformat_find_stream_info(ofctx, nullptr); 24 | int stream_index = -1; 25 | AVStream* avStream = avformat_new_stream(outFormatCtx, nullptr); 26 | for(int i=0; inb_streams; i++){ 27 | AVStream* stream = ofctx->streams[i]; 28 | if(stream->codecpar->codec_type == type){ 29 | avcodec_parameters_copy(avStream->codecpar, stream->codecpar); 30 | stream_index = i; 31 | break; 32 | } 33 | } 34 | auto ptr = std::make_shared<_MediaInfo>(); 35 | ptr->stream_index = stream_index; 36 | ptr->inAVFormatCtx = ofctx; 37 | ptr->outStream = avStream; 38 | return ptr; 39 | } 40 | 41 | bool ReadPacket(std::shared_ptr<_MediaInfo> mediaInfo, AVPacket* pkt){ 42 | bool readpkt = false; 43 | while(true){//直到读取到视频数据为止 44 | if(av_read_frame(mediaInfo->inAVFormatCtx, pkt)<0){ 45 | break; 46 | } 47 | if(pkt->stream_index == mediaInfo->stream_index){ 48 | readpkt = true; 49 | break; 50 | }else{ 51 | av_packet_unref(pkt); 52 | } 53 | } 54 | return readpkt; 55 | } 56 | 57 | //合成mp4文件,取其中一个文件的音频和另一个文件的视频合成新的mp4文件 58 | int merge_2mp4_output_mp4(){ 59 | const char* audioInFileName = "./半壶纱.mp4"; 60 | const char* videoInFileName = "./123.mp4"; 61 | const char* outputFileName = "./merge_output.mp4"; 62 | 63 | AVFormatContext* outFormatCtx; 64 | if(avformat_alloc_output_context2(&outFormatCtx, nullptr, nullptr, outputFileName)){ 65 | return -1; 66 | } 67 | 68 | std::shared_ptr<_MediaInfo> audioMediaInfo = InitStream(outFormatCtx, audioInFileName, AVMEDIA_TYPE_AUDIO); 69 | std::shared_ptr<_MediaInfo> videoMediaInfo = InitStream(outFormatCtx, videoInFileName, AVMEDIA_TYPE_VIDEO); 70 | 71 | if(avio_open(&outFormatCtx->pb, outputFileName, AVIO_FLAG_WRITE)<0){ 72 | return -1; 73 | } 74 | 75 | avformat_write_header(outFormatCtx, nullptr); 76 | 77 | AVPacket* pkt = av_packet_alloc(); 78 | av_init_packet(pkt); 79 | 80 | int64_t last_video_pts = 0; 81 | int64_t last_audio_pts = 0; 82 | 83 | AVStream* inAudioSt = audioMediaInfo->inAVFormatCtx->streams[audioMediaInfo->stream_index]; 84 | AVStream* inVideoSt = videoMediaInfo->inAVFormatCtx->streams[videoMediaInfo->stream_index]; 85 | while(1){ 86 | AVStream* in = nullptr; 87 | AVStream* out = nullptr; 88 | if(av_compare_ts(last_video_pts, inVideoSt->time_base, last_audio_pts, inAudioSt->time_base)<=0){//先读取视频 89 | if(!ReadPacket(videoMediaInfo, pkt)){ 90 | printf("视频文件读取结束\n"); 91 | break; 92 | } 93 | last_video_pts = pkt->pts; 94 | out = videoMediaInfo->outStream; 95 | in = inVideoSt; 96 | }else{//先读取音频 97 | if(!ReadPacket(audioMediaInfo, pkt)){ 98 | printf("音频文件结束\n"); 99 | break; 100 | } 101 | last_audio_pts = pkt->pts; 102 | out = audioMediaInfo->outStream; 103 | in = inAudioSt; 104 | } 105 | int64_t pts = pkt->pts; 106 | int64_t dts = pkt->dts; 107 | int64_t dur = pkt->duration; 108 | pkt->pts = av_rescale_q_rnd(pkt->pts, in->time_base, out->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 109 | pkt->dts = av_rescale_q_rnd(pkt->dts, in->time_base, out->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 110 | pkt->duration = av_rescale_q(pkt->duration, in->time_base, out->time_base); 111 | pkt->pos = -1; 112 | //printf("old_pts:%d, pkt->pts:%ld, old_dts:%ld, pkt->dts:%ld, old_duration:%ld, pkt->duration:%ld\n", pts, pkt->pts, dts, pkt->dts, dur, pkt->duration); 113 | pkt->stream_index = out->index; 114 | av_interleaved_write_frame(outFormatCtx, pkt); 115 | av_packet_unref(pkt); 116 | } 117 | 118 | 119 | av_write_trailer(outFormatCtx); 120 | avio_close(outFormatCtx->pb); 121 | avformat_free_context(outFormatCtx); 122 | 123 | av_packet_free(&pkt); 124 | 125 | avformat_close_input(&audioMediaInfo->inAVFormatCtx); 126 | avformat_close_input(&videoMediaInfo->inAVFormatCtx); 127 | 128 | return 0; 129 | } 130 | 131 | #endif -------------------------------------------------------------------------------- /src/merge_image_test.h: -------------------------------------------------------------------------------- 1 | #ifndef MERGE_IMAGE_TEST_H_H_ 2 | #define MERGE_IMAGE_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include "avframe_util.h" 8 | #include "codecimpl.h" 9 | 10 | static AVFrame* GetFrameFromImageFile(const char* inputfile){ 11 | AVFormatContext* inFormatCtx = NULL; 12 | AVCodecContext* vDecCodecContext = NULL; 13 | AVCodec* vDecCodec = NULL; 14 | int video_stream_index = -1; 15 | int ret = -1; 16 | AVPacket avpacket; 17 | AVFrame* outFrame = NULL; 18 | if(avformat_open_input(&inFormatCtx, inputfile, NULL, NULL) < 0){ 19 | return NULL; 20 | } 21 | video_stream_index = av_find_best_stream(inFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); 22 | if(video_stream_index == -1){ 23 | goto end; 24 | } 25 | { 26 | AVStream* stream = inFormatCtx->streams[video_stream_index]; 27 | 28 | const AVCodec* decCodec = avcodec_find_decoder(stream->codecpar->codec_id); 29 | vDecCodecContext = avcodec_alloc_context3(decCodec); 30 | avcodec_parameters_to_context(vDecCodecContext, stream->codecpar); 31 | if(avcodec_open2(vDecCodecContext, decCodec, nullptr)<0){ 32 | goto end; 33 | } 34 | } 35 | av_init_packet(&avpacket); 36 | while((ret = av_read_frame(inFormatCtx, &avpacket)) == 0){ 37 | if(avpacket.stream_index != video_stream_index){ 38 | continue; 39 | } 40 | decode(vDecCodecContext, &avpacket, [&](AVCodecContext *ctx, const AVFrame* frame){ 41 | outFrame = av_frame_clone(frame); 42 | }); 43 | break; 44 | } 45 | end: 46 | avformat_close_input(&inFormatCtx); 47 | if(vDecCodecContext){ 48 | avcodec_close(vDecCodecContext); 49 | } 50 | return outFrame; 51 | } 52 | 53 | static int WriteFrameToFile(const char* outputfile, AVFrame* merge){ 54 | AVFormatContext* outFormatCtx = NULL; 55 | AVCodecContext* vEncCodecContext = NULL; 56 | int ret = -1; 57 | if(avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, outputfile) < 0){ 58 | goto end; 59 | } 60 | avformat_new_stream(outFormatCtx, 0); 61 | 62 | if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE)) { 63 | ret = avio_open(&outFormatCtx->pb, outputfile, AVIO_FLAG_WRITE); 64 | if (ret < 0) { 65 | fprintf(stderr, "Could not open output file '%s'", outputfile); 66 | goto end; 67 | } 68 | } 69 | {//初始化编码器 70 | const AVCodec* vEncCodec = avcodec_find_encoder(outFormatCtx->oformat->video_codec); 71 | vEncCodecContext = avcodec_alloc_context3(vEncCodec); 72 | vEncCodecContext->codec_id = vEncCodec->id; 73 | vEncCodecContext->codec_type = AVMEDIA_TYPE_VIDEO; 74 | vEncCodecContext->pix_fmt = AV_PIX_FMT_YUVJ420P; 75 | vEncCodecContext->width = merge->width; 76 | vEncCodecContext->height = merge->height; 77 | vEncCodecContext->time_base.num = 1; 78 | vEncCodecContext->time_base.den = 25; 79 | if (avcodec_open2(vEncCodecContext, vEncCodec,NULL) < 0){ 80 | goto end; 81 | } 82 | } 83 | 84 | if(avformat_write_header(outFormatCtx, nullptr) < 0){ 85 | goto end; 86 | } 87 | merge->pts = 0; 88 | encode(vEncCodecContext, merge, [&](AVCodecContext* ctx, const AVPacket* avpkt){ 89 | AVPacket* pkt = av_packet_clone(avpkt); 90 | av_write_frame(outFormatCtx, pkt); 91 | av_packet_unref(pkt); 92 | }); 93 | 94 | ret = av_write_trailer(outFormatCtx); 95 | 96 | end: 97 | if(vEncCodecContext){ 98 | avcodec_close(vEncCodecContext); 99 | } 100 | if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE)){ 101 | avio_closep(&outFormatCtx->pb); 102 | } 103 | return ret; 104 | } 105 | 106 | int VerticalMergeImageFiles(std::vector imageFiles, const char* outputfile){ 107 | AVFrame* up = GetFrameFromImageFile(imageFiles[0].c_str()); 108 | AVFrame* down = NULL; 109 | int ret = -1; 110 | for(int i=1; i images; 138 | for(int i=1; i<=3; i++){ 139 | std::string path = std::string("./images/") + std::to_string(i)+std::string(".jpeg"); 140 | images.push_back(path); 141 | } 142 | const char* outfile = "merge.jpeg"; 143 | VerticalMergeImageFiles(images, outfile); 144 | } 145 | 146 | #endif -------------------------------------------------------------------------------- /src/merge_yuv420_test.h: -------------------------------------------------------------------------------- 1 | #ifndef MERGE_YUV420_TEST_H_H_ 2 | #define MERGE_YUV420_TEST_H_H_ 3 | 4 | #include 5 | #include "avframe_util.h" 6 | #include "global.h" 7 | 8 | void merge_yuv420_test(){ 9 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 10 | if(rFile == NULL) 11 | return; 12 | const char* oFileName1 = "./akiyo_cif_horizontal.yuv"; 13 | const char* oFileName2 = "./akiyo_cif_vertical.yuv"; 14 | int width = 352; 15 | int height = 288; 16 | FILE* oFile1 = fopen(oFileName1, "wb"); 17 | FILE* oFile2 = fopen(oFileName2, "wb"); 18 | 19 | AVFrame* frame1 = av_frame_alloc(); 20 | AVFrame* frame2 = av_frame_alloc(); 21 | int i=0; 22 | while(1){ 23 | if(feof(rFile)) 24 | break; 25 | 26 | frame1->format = AV_PIX_FMT_YUV420P; 27 | frame1->width = width; 28 | frame1->height = height; 29 | av_frame_get_buffer(frame1, 32); 30 | 31 | ReadYUV420FromFile(frame1, rFile);//从yuv文件填充AVFrame 32 | 33 | frame2->format = AV_PIX_FMT_YUV420P; 34 | frame2->width = width; 35 | frame2->height = height; 36 | av_frame_get_buffer(frame2, 32); 37 | 38 | ReadYUV420FromFile(frame2, rFile);//从yuv文件填充AVFrame 39 | 40 | AVFrame* mergeHorizonal = YUV420HorizontalMerge(frame1, frame2); 41 | AVFrame* mergeVertical = YUV420VerticalMerge(frame1, frame2); 42 | 43 | WriteYUV420ToFile(mergeHorizonal, oFile1); 44 | WriteYUV420ToFile(mergeVertical, oFile2); 45 | 46 | 47 | av_frame_unref(frame1); 48 | av_frame_unref(frame2); 49 | 50 | av_frame_free(&mergeHorizonal); 51 | av_frame_free(&mergeVertical); 52 | } 53 | 54 | fclose(rFile); 55 | fclose(oFile1); 56 | fclose(oFile2); 57 | } 58 | 59 | 60 | #endif -------------------------------------------------------------------------------- /src/pw_truecut_hdr.h: -------------------------------------------------------------------------------- 1 | #ifndef PW_TRUECUT_HDR_H_H_ 2 | #define PW_TRUECUT_HDR_H_H_ 3 | 4 | #define IMAGE_CSP_NONE -1 5 | #define IMAGE_CSP_NV12 0 // 10bits是高位10bit, 其它都是低位10bit 6 | #define IMAGE_CSP_I420 1 7 | #define IMAGE_CSP_YV12 2 8 | #define IMAGE_CSP_YUV422 10 9 | #define IMAGE_CSP_YUV444F 12 //planar 10 | #define IMAGE_CSP_V210 13 //UYVY bit zip 11 | 12 | typedef struct MediaInfo 13 | { 14 | int iformat; //input format 15 | int oformat; //output format 16 | int w; 17 | int h; 18 | int in_depth; // 8bits or 10bits 19 | int out_depth; // 8bits or 10bits 20 | int in_linesize; 21 | int out_linesize; 22 | int left, top; // 注:格式变换可能会添加黑边,黑边可能会影响图像处理效果。 23 | int in_vlinesize; // 有些硬解有高度对齐的yuv数据 24 | int out_vlinesize; 25 | }MediaInfo; 26 | 27 | 28 | typedef struct PWTCHDRMetadata 29 | { 30 | }PWTCHDRMetadata; 31 | 32 | typedef struct PWTCHDRHandle PWTCHDRHandle; 33 | 34 | #ifdef __cplusplus 35 | extern "C"{ 36 | #endif 37 | 38 | //初始化函数 39 | PWTCHDRHandle* pw_truecut_hdr_init(MediaInfo mi, void* config, int mode, int gpu_core_Id, char *key); 40 | //获取metadata 41 | PWTCHDRMetadata* get_pw_truecut_metadata(PWTCHDRHandle* handle); 42 | //hdr处理函数 43 | void pw_truecut_hdr_process(void* in, void* out, PWTCHDRHandle* handle); 44 | //释放函数 45 | void pw_truecut_hdr_uninit(PWTCHDRHandle* handle); 46 | 47 | #ifdef __cplusplus 48 | } 49 | #endif 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /src/remuxing_test.h: -------------------------------------------------------------------------------- 1 | #ifndef REMUXING_TEST_H_H_ 2 | #define REMUXING_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | 7 | struct StreamInfo{ 8 | AVStream* in; 9 | AVStream* out; 10 | }; 11 | //本例子是将mp4容器的音视频文件原码转换成flv格式的音视频 12 | int remuxing_test(){ 13 | const char* inFileName = "./半壶纱.mp4"; 14 | //const char* inFileName = "./akiyo_cif.mp4"; 15 | const char* outFileName = "./半壶纱.flv"; 16 | int ret = -1; 17 | AVFormatContext* in_avformat_ctx = nullptr; 18 | AVFormatContext* out_avformat_ctx = nullptr; 19 | std::map infoMap; 20 | AVPacket * pkt; 21 | //带goto语言的变量都需要提前定义如下 22 | if(avformat_open_input(&in_avformat_ctx, inFileName, nullptr, nullptr) < 0){ 23 | goto end; 24 | } 25 | if(avformat_find_stream_info(in_avformat_ctx, nullptr)){ 26 | goto end; 27 | } 28 | if(avformat_alloc_output_context2(&out_avformat_ctx, nullptr, nullptr, outFileName) < 0){ 29 | goto end; 30 | } 31 | 32 | for(int i=0; inb_streams; i++){ 33 | AVStream* stream = in_avformat_ctx->streams[i]; 34 | if(stream->codecpar->codec_type != AVMEDIA_TYPE_AUDIO && 35 | stream->codecpar->codec_type != AVMEDIA_TYPE_VIDEO && 36 | stream->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE){ 37 | continue; 38 | } 39 | AVStream* out = avformat_new_stream(out_avformat_ctx, nullptr); 40 | avcodec_parameters_copy(out->codecpar, stream->codecpar); 41 | //下边这个参数很重要,如果不重置这个参数会出现类似于Tag avc1 incompatible with output codec id '27' ([7][0][0][0])这个的错误 42 | out->codecpar->codec_tag = 0; 43 | infoMap.insert(std::pair(i, StreamInfo{stream,out})); 44 | } 45 | 46 | for(auto &iter: infoMap){ 47 | StreamInfo streamInfo = iter.second; 48 | printf("in:分母%d,分子:%d, out:分母%d,分子:%d\n", 49 | streamInfo.in->time_base.den, streamInfo.in->time_base.num, streamInfo.out->time_base.den, streamInfo.out->time_base.num); 50 | } 51 | 52 | 53 | if (!(out_avformat_ctx->oformat->flags & AVFMT_NOFILE)) { 54 | ret = avio_open(&out_avformat_ctx->pb, outFileName, AVIO_FLAG_WRITE); 55 | if (ret < 0) { 56 | fprintf(stderr, "Could not open output file '%s'", outFileName); 57 | goto end; 58 | } 59 | } 60 | 61 | if(avformat_write_header(out_avformat_ctx, nullptr) < 0){ 62 | goto end; 63 | } 64 | 65 | for(auto &iter: infoMap){ 66 | StreamInfo streamInfo = iter.second; 67 | printf("in:分母%d,分子:%d, out:分母%d,分子:%d\n", 68 | streamInfo.in->time_base.den, streamInfo.in->time_base.num, streamInfo.out->time_base.den, streamInfo.out->time_base.num); 69 | } 70 | 71 | 72 | pkt = av_packet_alloc(); 73 | while(1){ 74 | ret = av_read_frame(in_avformat_ctx, pkt); 75 | if(ret!=0){ 76 | printf("read error or file end\n"); 77 | break; 78 | } 79 | auto iter = infoMap.find(pkt->stream_index); 80 | if(iter == infoMap.end()){ 81 | av_packet_unref(pkt); 82 | continue; 83 | } 84 | AVStream *in_stream = in_avformat_ctx->streams[pkt->stream_index]; 85 | AVStream *out_stream = out_avformat_ctx->streams[pkt->stream_index]; 86 | //AVStream *in_stream = iter->second.in; 87 | //AVStream *out_stream = iter->second.out; 88 | pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 89 | pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 90 | pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base); 91 | pkt->pos = -1; 92 | 93 | ret = av_interleaved_write_frame(out_avformat_ctx, pkt); 94 | if (ret < 0) { 95 | fprintf(stderr, "Error muxing packet\n"); 96 | break; 97 | } 98 | av_packet_unref(pkt); 99 | } 100 | 101 | //ret = av_interleaved_write_frame(out_avformat_ctx, nullptr); 102 | // if (ret < 0) { 103 | // fprintf(stderr, "Error muxing packet\n"); 104 | // } 105 | 106 | av_write_trailer(out_avformat_ctx); 107 | 108 | av_packet_free(&pkt); 109 | 110 | ret = 0; 111 | end: 112 | avformat_close_input(&in_avformat_ctx); 113 | 114 | /* close output */ 115 | if (out_avformat_ctx && !(out_avformat_ctx->flags & AVFMT_NOFILE)) 116 | avio_closep(&out_avformat_ctx->pb); 117 | avformat_free_context(out_avformat_ctx); 118 | 119 | return ret; 120 | } 121 | #endif -------------------------------------------------------------------------------- /src/resample_audio_test.h: -------------------------------------------------------------------------------- 1 | #ifndef RESAMPLE_AUDIO_TEST_H_H_ 2 | #define RESAMPLE_AUDIO_TEST_H_H_ 3 | 4 | #include "global.h" 5 | #include "codecimpl.h" 6 | #include 7 | #include "audio_convert_tool.h" 8 | #include "avframe_util.h" 9 | 10 | 11 | /* check that a given sample format is supported by the encoder */ 12 | static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt) 13 | { 14 | const enum AVSampleFormat *p = codec->sample_fmts; 15 | 16 | while (*p != AV_SAMPLE_FMT_NONE) { 17 | if (*p == sample_fmt) 18 | return 1; 19 | p++; 20 | } 21 | return 0; 22 | } 23 | 24 | /* just pick the highest supported samplerate */ 25 | static int select_sample_rate(const AVCodec *codec) 26 | { 27 | const int *p; 28 | int best_samplerate = 0; 29 | 30 | if (!codec->supported_samplerates) 31 | return 44100; 32 | 33 | p = codec->supported_samplerates; 34 | while (*p) { 35 | if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate)) 36 | best_samplerate = *p; 37 | p++; 38 | } 39 | return best_samplerate; 40 | } 41 | 42 | /* select layout with the highest channel count */ 43 | static int select_channel_layout(const AVCodec *codec) 44 | { 45 | const uint64_t *p; 46 | uint64_t best_ch_layout = 0; 47 | int best_nb_channels = 0; 48 | 49 | if (!codec->channel_layouts) 50 | return AV_CH_LAYOUT_STEREO; 51 | 52 | p = codec->channel_layouts; 53 | while (*p) { 54 | int nb_channels = av_get_channel_layout_nb_channels(*p); 55 | 56 | if (nb_channels > best_nb_channels) { 57 | best_ch_layout = *p; 58 | best_nb_channels = nb_channels; 59 | } 60 | p++; 61 | } 62 | return best_ch_layout; 63 | } 64 | 65 | 66 | /** 67 | * Initialize one input frame for writing to the output file. 68 | * The frame will be exactly frame_size samples large. 69 | * @param[out] frame Frame to be initialized 70 | * @param output_codec_context Codec context of the output file 71 | * @param frame_size Size of the frame 72 | * @return Error code (0 if successful) 73 | */ 74 | static int init_output_frame(AVFrame **frame, 75 | AVCodecContext *output_codec_context, 76 | int frame_size) 77 | { 78 | int error; 79 | 80 | /* Create a new frame to store the audio samples. */ 81 | if (!(*frame = av_frame_alloc())) { 82 | fprintf(stderr, "Could not allocate output frame\n"); 83 | return AVERROR_EXIT; 84 | } 85 | 86 | /* Set the frame's parameters, especially its size and format. 87 | * av_frame_get_buffer needs this to allocate memory for the 88 | * audio samples of the frame. 89 | * Default channel layouts based on the number of channels 90 | * are assumed for simplicity. */ 91 | (*frame)->nb_samples = frame_size; 92 | (*frame)->channel_layout = output_codec_context->channel_layout; 93 | (*frame)->format = output_codec_context->sample_fmt; 94 | (*frame)->sample_rate = output_codec_context->sample_rate; 95 | 96 | /* Allocate the samples of the created frame. This call will make 97 | * sure that the audio frame can hold as many samples as specified. */ 98 | if ((error = av_frame_get_buffer(*frame, 0)) < 0) { 99 | fprintf(stderr, "Could not allocate output frame samples (error '%s')\n", 100 | av_err2str(error)); 101 | av_frame_free(frame); 102 | return error; 103 | } 104 | 105 | return 0; 106 | } 107 | 108 | 109 | int resample_audio_test(){ 110 | const char * inFileName = "./半壶纱.mp4"; 111 | AVFormatContext * avformatctx = nullptr; 112 | if(avformat_open_input(&avformatctx, inFileName, nullptr, nullptr)<0){ 113 | return -1; 114 | } 115 | avformat_find_stream_info(avformatctx, nullptr); 116 | AVCodecContext *audioContext = nullptr; 117 | int audio_stream_index, video_stream_index; 118 | for(int i=0; inb_streams; i++){ 119 | AVStream* stream = avformatctx->streams[i]; 120 | if(stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){ 121 | const AVCodec* pCodec = avcodec_find_decoder(stream->codecpar->codec_id); 122 | audioContext = avcodec_alloc_context3(pCodec); 123 | avcodec_parameters_to_context(audioContext, stream->codecpar); 124 | //av_codec_set_pkt_timebase(audioContext, stream->time_base); 125 | 126 | if(avcodec_open2(audioContext, pCodec, nullptr)<0){ 127 | return -1; 128 | } 129 | audio_stream_index = i; 130 | //break; 131 | } 132 | } 133 | 134 | const char* oFileName = "test.MP3"; 135 | AVFormatContext* oAVformatCtx = nullptr; 136 | if(avformat_alloc_output_context2(&oAVformatCtx, nullptr, nullptr, oFileName)<0){ 137 | return -1; 138 | } 139 | 140 | const AVCodec *codec = avcodec_find_encoder(oAVformatCtx->oformat->audio_codec); 141 | AVCodecContext* outputCodecCtx = avcodec_alloc_context3(codec); 142 | if (!outputCodecCtx) { 143 | fprintf(stderr, "Could not allocate audio codec context\n"); 144 | exit(1); 145 | } 146 | /* put sample parameters */ 147 | outputCodecCtx->bit_rate = 32000; 148 | /* check that the encoder supports s16 pcm input */ 149 | outputCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP; 150 | if (!check_sample_fmt(codec, outputCodecCtx->sample_fmt)) { 151 | fprintf(stderr, "Encoder does not support sample format %s", 152 | av_get_sample_fmt_name(outputCodecCtx->sample_fmt)); 153 | exit(1); 154 | } 155 | /* select other audio parameters supported by the encoder */ 156 | outputCodecCtx->sample_rate = 16000;//select_sample_rate(codec); 157 | outputCodecCtx->channel_layout = select_channel_layout(codec); 158 | outputCodecCtx->channels = av_get_channel_layout_nb_channels(outputCodecCtx->channel_layout); 159 | outputCodecCtx->time_base = AVRational{1, outputCodecCtx->sample_rate}; 160 | 161 | /* open it */ 162 | if (avcodec_open2(outputCodecCtx, codec, NULL) < 0) { 163 | fprintf(stderr, "Could not open codec\n"); 164 | exit(1); 165 | } 166 | 167 | AVStream* oAudioStream = avformat_new_stream(oAVformatCtx, nullptr); 168 | if(oAudioStream==nullptr){ 169 | return -1; 170 | } 171 | //非常重要 172 | avcodec_parameters_from_context(oAudioStream->codecpar, outputCodecCtx); 173 | 174 | if(avio_open(&oAVformatCtx->pb, oFileName, AVIO_FLAG_READ_WRITE)<0){ 175 | return -1; 176 | } 177 | 178 | if(avformat_write_header(oAVformatCtx, nullptr)<0){ 179 | return -1; 180 | } 181 | 182 | std::shared_ptr swrCtxManager = nullptr; 183 | if(audioContext->sample_rate != outputCodecCtx->sample_rate || 184 | audioContext->channel_layout != outputCodecCtx->channel_layout || 185 | audioContext->sample_fmt != outputCodecCtx->sample_fmt){ //先做重采样再mp3编码 186 | 187 | swrCtxManager = std::make_shared(audioContext->channel_layout, audioContext->sample_rate, (AVSampleFormat)audioContext->sample_fmt, outputCodecCtx->channel_layout, outputCodecCtx->sample_rate, (AVSampleFormat)outputCodecCtx->sample_fmt); 188 | if(!swrCtxManager->Init()){ 189 | return -1; 190 | } 191 | } 192 | 193 | AVAudioFifo *fifo = av_audio_fifo_alloc(outputCodecCtx->sample_fmt, outputCodecCtx->channels, 1); 194 | 195 | 196 | auto encodeCallback = [&](AVCodecContext *ctx, const AVPacket* avpkt){ 197 | AVPacket* pkt = av_packet_clone(avpkt); 198 | pkt->stream_index = oAudioStream->index; 199 | av_interleaved_write_frame(oAVformatCtx, pkt); 200 | av_packet_unref(pkt); 201 | }; 202 | 203 | //是否读文件结束 204 | bool read_eof = false; 205 | 206 | FILE* pcmFile = fopen("./test.pcm", "wb"); 207 | 208 | auto callback = [&](AVCodecContext *ctx, const AVFrame* frame){ 209 | WritePCMToFile(frame, pcmFile); 210 | if(swrCtxManager != nullptr){ //转换 211 | int ret = swrCtxManager->Convert((const uint8_t**)frame->extended_data, frame->nb_samples);//wr_convert(swrCtxManager->swr_ctx, dst_data, dst_nb_samples, (const uint8_t**)frame->extended_data, frame->nb_samples); 212 | if(ret > 0){//add to Audio_FIFO 213 | av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + ret); 214 | av_audio_fifo_write(fifo, (void**)swrCtxManager->GetConvertedBuffer(), ret); 215 | } 216 | 217 | }else{//加入Audio_FIFO 218 | av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame->nb_samples); 219 | av_audio_fifo_write(fifo, (void**)frame->extended_data, frame->nb_samples); 220 | } 221 | 222 | int readsize = outputCodecCtx->frame_size; 223 | while(av_audio_fifo_size(fifo) >= readsize|| 224 | (read_eof && av_audio_fifo_size(fifo)> 0)){//当文件已经读结束,需要把没有凑成readsize的也要编码 225 | AVFrame *frame; 226 | const int frame_size = FFMIN(av_audio_fifo_size(fifo), readsize); 227 | init_output_frame(&frame, outputCodecCtx, frame_size); 228 | if (av_audio_fifo_read(fifo, (void **)frame->data, frame_size) < frame_size) { 229 | fprintf(stderr, "Could not read data from FIFO\n"); 230 | av_frame_free(&frame); 231 | return AVERROR_EXIT; 232 | } 233 | static int64_t pts = 0; 234 | frame->pts = pts; 235 | pts += frame->nb_samples; 236 | encode(outputCodecCtx, frame, encodeCallback); 237 | } 238 | }; 239 | 240 | AVPacket *packet = av_packet_alloc(); 241 | av_init_packet(packet); 242 | while(1){ 243 | int ret = av_read_frame(avformatctx, packet); 244 | if(ret!=0){ 245 | printf("read error or file end\n"); 246 | read_eof = true; 247 | break; 248 | } 249 | if(packet->stream_index==audio_stream_index){ 250 | decode(audioContext, packet, callback); 251 | } 252 | 253 | av_packet_unref(packet); 254 | } 255 | 256 | decode(audioContext, nullptr, callback); 257 | 258 | avcodec_close(audioContext); 259 | 260 | avformat_close_input(&avformatctx); 261 | 262 | av_packet_free(&packet); 263 | 264 | av_write_trailer(oAVformatCtx); 265 | 266 | fclose(pcmFile); 267 | 268 | return 0; 269 | } 270 | 271 | 272 | #endif -------------------------------------------------------------------------------- /src/separate_mp4_output_audio_video_mp4.h: -------------------------------------------------------------------------------- 1 | #ifndef SEPARATE_MP4_OUTPUT_AUDIO_VIDEO_MP4_H_H_ 2 | #define SEPARATE_MP4_OUTPUT_AUDIO_VIDEO_MP4_H_H_ 3 | 4 | #include "global.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | 12 | //分离mp4文件并分别保存音频mp4文件和视频mp4文件 13 | int separate_mp4_output_audio_video_mp4_test(){ 14 | const char * inFileName = "./半壶纱.mp4"; 15 | AVFormatContext * avformatctx; 16 | if(avformat_open_input(&avformatctx, inFileName, nullptr, nullptr)<0){ 17 | return -1; 18 | } 19 | avformat_find_stream_info(avformatctx, nullptr); 20 | 21 | const char* outVideoFileName = "./video_output.mp4"; 22 | const char* outAudioFileName = "./audio_output.mp4"; 23 | 24 | AVFormatContext* videoFormatCtx; 25 | AVFormatContext* audioFormatCtx; 26 | 27 | if(avformat_alloc_output_context2(&videoFormatCtx, nullptr, nullptr, outVideoFileName)){ 28 | return -1; 29 | } 30 | 31 | if(avformat_alloc_output_context2(&audioFormatCtx, nullptr, nullptr, outAudioFileName)){ 32 | return -1; 33 | } 34 | 35 | AVStream* audioStream = avformat_new_stream(audioFormatCtx, nullptr); 36 | AVStream* videoStream = avformat_new_stream(videoFormatCtx, nullptr); 37 | 38 | int audio_stream_index, video_stream_index; 39 | for(int i=0; inb_streams; i++){ 40 | AVStream* stream = avformatctx->streams[i]; 41 | if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ 42 | video_stream_index = i; 43 | avcodec_parameters_copy(videoStream->codecpar, stream->codecpar); 44 | }else if(stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){ 45 | avcodec_parameters_copy(audioStream->codecpar, stream->codecpar); 46 | audio_stream_index = i; 47 | } 48 | } 49 | 50 | 51 | if(avio_open(&videoFormatCtx->pb, outVideoFileName, AVIO_FLAG_WRITE)<0){ 52 | return -1; 53 | } 54 | if(avio_open(&audioFormatCtx->pb, outAudioFileName, AVIO_FLAG_WRITE)<0){ 55 | return -1; 56 | } 57 | 58 | avformat_write_header(videoFormatCtx, nullptr); 59 | avformat_write_header(audioFormatCtx, nullptr); 60 | 61 | AVPacket* pkt = av_packet_alloc(); 62 | while(1){ 63 | int ret = av_read_frame(avformatctx, pkt); 64 | if(ret!=0){ 65 | break; 66 | } 67 | AVStream* in = avformatctx->streams[pkt->stream_index]; 68 | AVStream* out = nullptr; 69 | AVFormatContext* outCtx; 70 | if(pkt->stream_index == video_stream_index){ 71 | pkt->stream_index = 0; 72 | out = videoStream; 73 | outCtx = videoFormatCtx; 74 | }else if(pkt->stream_index == audio_stream_index){ 75 | pkt->stream_index = 0; 76 | out = audioStream; 77 | outCtx = audioFormatCtx; 78 | } 79 | int64_t pts = pkt->pts; 80 | int64_t dts = pkt->dts; 81 | int64_t dur = pkt->duration; 82 | pkt->pts = av_rescale_q_rnd(pkt->pts, in->time_base, out->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 83 | pkt->dts = av_rescale_q_rnd(pkt->dts, in->time_base, out->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 84 | pkt->duration = av_rescale_q(pkt->duration, in->time_base, out->time_base); 85 | pkt->pos = -1; 86 | //printf("old_pts:%d, pkt->pts:%ld, old_dts:%ld, pkt->dts:%ld, old_duration:%ld, pkt->duration:%ld\n", pts, pkt->pts, dts, pkt->dts, dur, pkt->duration); 87 | av_interleaved_write_frame(outCtx, pkt); 88 | av_packet_unref(pkt); 89 | } 90 | av_packet_free(&pkt); 91 | 92 | av_write_trailer(audioFormatCtx); 93 | av_write_trailer(videoFormatCtx); 94 | 95 | avio_close(videoFormatCtx->pb); 96 | avio_close(audioFormatCtx->pb); 97 | 98 | avformat_free_context(avformatctx); 99 | avformat_free_context(audioFormatCtx); 100 | avformat_free_context(videoFormatCtx); 101 | 102 | return 0; 103 | } 104 | 105 | #endif -------------------------------------------------------------------------------- /src/truecut_tcif.cpp: -------------------------------------------------------------------------------- 1 | #include "truecut_tcif.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | extern "C"{ 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | } 26 | 27 | using namespace HEIFPP; 28 | 29 | class FFmpegOutStream : public HEIF::OutputStreamInterface{ 30 | public: 31 | FFmpegOutStream(AVIOContext *s, const char* filename) 32 | :m_s(s), 33 | mFilename(filename){ 34 | } 35 | virtual void seekp(std::uint64_t aPos) override 36 | { 37 | avio_seek(m_s, aPos,SEEK_SET); 38 | } 39 | virtual std::uint64_t tellp() override 40 | { 41 | return avio_seek(m_s, 0, SEEK_CUR); 42 | } 43 | virtual void write(const void* aBuf, std::uint64_t aCount) override 44 | { 45 | avio_write(m_s, (const unsigned char*)aBuf, aCount); 46 | } 47 | virtual void remove() override 48 | { 49 | if (!mFilename.empty()) 50 | { 51 | avpriv_io_delete(mFilename.c_str()); 52 | } 53 | } 54 | private: 55 | AVIOContext *m_s; 56 | std::string mFilename; 57 | }; 58 | 59 | 60 | static bool parseNalu(const uint8_t* data, uint64_t datalen, HEIF::Array& decoderSpecificInfo, HEIF::Array& hevcData ){ 61 | NAL_State d; 62 | d.init_parse(data, datalen); 63 | int flags = 0; 64 | for(;;){ 65 | const std::uint8_t* nal_data = nullptr; 66 | std::uint64_t nal_len = 0; 67 | if (!d.parse_byte_stream(nal_data, nal_len)) 68 | { 69 | break; 70 | } 71 | int type; 72 | type = (nal_data[0] >> 1) & 0x3f; 73 | if(( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::PREFIX_SEI_NUT || ( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::SUFFIX_SEI_NUT){ 74 | continue; 75 | } 76 | if(( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_VPS ||( HEIF::DecoderSpecInfoType) type == HEIF::DecoderSpecInfoType::HEVC_SPS || ( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_PPS){ 77 | std::uint32_t index = 0; 78 | if (( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_VPS) 79 | index = 0; 80 | else if (( HEIF::DecoderSpecInfoType)type == HEIF::DecoderSpecInfoType::HEVC_SPS) 81 | index = 1; 82 | else 83 | index = 2; 84 | flags |= 1u << index; 85 | decoderSpecificInfo[index].decSpecInfoType = ( HEIF::DecoderSpecInfoType)type; 86 | decoderSpecificInfo[index].decSpecInfoData = HEIF::Array(nal_len + 4); 87 | decoderSpecificInfo[index].decSpecInfoData[0] = decoderSpecificInfo[index].decSpecInfoData[1] = 88 | decoderSpecificInfo[index].decSpecInfoData[2] = 0; 89 | decoderSpecificInfo[index].decSpecInfoData[3] = 1; 90 | std::memcpy(decoderSpecificInfo[index].decSpecInfoData.elements + 4, nal_data, nal_len); 91 | }else if( type == 16 || type == 17 || type == 18 || 92 | type == 19 || type == 20 || type == 21 ){ 93 | hevcData = HEIF::Array(nal_len + 4); 94 | hevcData[0] = hevcData[1] = hevcData[2] = 0; 95 | hevcData[3] = 1; 96 | std::memcpy(hevcData.elements + 4, nal_data, nal_len); 97 | }else{ 98 | return false; 99 | } 100 | 101 | } 102 | 103 | if (flags > 0 && flags != 7 ) 104 | { 105 | return false; 106 | } 107 | return true; 108 | } 109 | 110 | typedef struct TCIFHandle{ 111 | Heif* heif; 112 | uint32_t width; 113 | uint32_t height; 114 | int flag; 115 | uint32_t timeScale; 116 | uint64_t duration; 117 | HEIF::Array* decoderSpecificInfo; 118 | bool specificInfo; 119 | std::vector> *hevcDatas; 120 | }TCIFHandle; 121 | 122 | TCIFHandle* createHandle(const uint8_t* extradata, uint64_t extradata_size, uint32_t width, uint32_t height){ 123 | TCIFHandle* handle = (TCIFHandle*)malloc(sizeof(TCIFHandle)); 124 | handle->hevcDatas = new std::vector>(); 125 | handle->width = width; 126 | handle->height = height; 127 | Heif *heif = new Heif(); 128 | heif->setMajorBrand("msf1"); 129 | heif->addCompatibleBrand(HEIF::FourCC("heic")); 130 | heif->addCompatibleBrand(HEIF::FourCC("hevc")); 131 | heif->addCompatibleBrand(HEIF::FourCC("mif1")); 132 | heif->addCompatibleBrand(HEIF::FourCC("iso8")); 133 | heif->addCompatibleBrand(HEIF::FourCC("mp41")); 134 | handle->heif = heif; 135 | handle->decoderSpecificInfo = new HEIF::Array(3); 136 | handle->specificInfo = false; 137 | if(extradata != NULL && extradata_size >0){ 138 | HEIF::Array hevcData; 139 | if(!parseNalu(extradata, extradata_size, *handle->decoderSpecificInfo, hevcData)){ 140 | return NULL; 141 | } 142 | handle->specificInfo = true; 143 | } 144 | return handle; 145 | } 146 | int configHandle(TCIFHandle* handle, int flag, uint32_t timeScale, uint64_t duration){ 147 | if(handle == NULL){ 148 | return -1; 149 | } 150 | handle->flag = flag; 151 | handle->timeScale = timeScale; 152 | handle->duration = duration; 153 | return 0; 154 | } 155 | /** 156 | * delete tcif handle 157 | */ 158 | void deleteHandle(TCIFHandle* handle){ 159 | if(handle != NULL){ 160 | if(handle->heif != NULL){ 161 | delete handle->heif; 162 | handle->heif = NULL; 163 | } 164 | if(handle->decoderSpecificInfo != NULL){ 165 | delete handle->decoderSpecificInfo; 166 | handle->decoderSpecificInfo = NULL; 167 | } 168 | if(handle->hevcDatas != NULL){ 169 | handle->hevcDatas->clear(); 170 | delete handle->hevcDatas; 171 | handle->hevcDatas = NULL; 172 | } 173 | free(handle); 174 | handle = NULL; 175 | } 176 | } 177 | /** 178 | * save tcif 179 | */ 180 | int saveImage(TCIFHandle* handle, AVIOContext* avioContext, const char* url){ 181 | if(handle == NULL || handle->heif == NULL || !handle->specificInfo || handle->hevcDatas->size() == 0){ 182 | return -1; 183 | } 184 | Heif* heif = handle->heif; 185 | DecoderConfig* config = new HEVCDecoderConfiguration(heif); 186 | config->setConfig(*handle->decoderSpecificInfo); 187 | switch(handle->flag){ 188 | case TCIF_SINGLE_IMAGE_FLAG:{//single frame 189 | HEVCCodedImageItem* imageItem = new HEVCCodedImageItem(heif); 190 | imageItem->setSize(handle->width, handle->height); 191 | imageItem->setDecoderConfiguration(config); 192 | imageItem->setItemData(handle->hevcDatas->at(0).elements, handle->hevcDatas->at(0).size); 193 | heif->setPrimaryItem(imageItem); 194 | break; 195 | } 196 | case TCIF_MULTIPLE_IMAGE_FLAG:{// more than one frame 197 | for(std::size_t i = 0; ihevcDatas->size(); i++){ 198 | HEVCCodedImageItem* imageItem = new HEVCCodedImageItem(heif); 199 | imageItem->setSize(handle->width, handle->height); 200 | imageItem->setDecoderConfiguration(config); 201 | imageItem->setItemData(handle->hevcDatas->at(i).elements, handle->hevcDatas->at(i).size); 202 | if(heif->getPrimaryItem() == NULL){ 203 | heif->setPrimaryItem(imageItem); 204 | } 205 | } 206 | break; 207 | } 208 | case TCIF_VIDEO_FLAG:{//video 209 | HEVCCodedImageItem* imageItem = new HEVCCodedImageItem(heif); 210 | imageItem->setSize(handle->width, handle->height); 211 | imageItem->setDecoderConfiguration(config); 212 | imageItem->setItemData(handle->hevcDatas->at(0).elements, handle->hevcDatas->at(0).size); 213 | heif->setPrimaryItem(imageItem); 214 | VideoTrack* videoTrack = new VideoTrack(heif); 215 | videoTrack->setTimescale(handle->timeScale); 216 | for (std::size_t i = 0; i < handle->hevcDatas->size(); ++i) { 217 | HEIFPP::VideoSample* imageSeqSample = new HEIFPP::VideoSample(heif); 218 | imageSeqSample->setType(HEIF::FourCC("hvc1")); 219 | imageSeqSample->setDecoderConfiguration(config); 220 | imageSeqSample->setItemData(handle->hevcDatas->at(i).elements, handle->hevcDatas->at(i).size); 221 | imageSeqSample->setDuration(handle->duration); 222 | videoTrack->addSample(imageSeqSample); 223 | } 224 | break; 225 | } 226 | default: 227 | return -1; 228 | } 229 | HEIF::OutputStreamInterface* out = new FFmpegOutStream(avioContext, url); 230 | HEIFPP::Result r = heif->save(out); 231 | delete out; 232 | 233 | return (r == Result::OK ? 0 : -1); 234 | } 235 | 236 | /** 237 | * add avpacket into tcif 238 | */ 239 | int addAVPacket(TCIFHandle* handle, AVPacket* avpkt){ 240 | if(handle == NULL || handle->hevcDatas == NULL || avpkt == NULL){ 241 | return -1; 242 | } 243 | HEIF::Array hevcData; 244 | if(!handle->specificInfo || (avpkt->flags & AV_PKT_FLAG_KEY)){ 245 | if(!parseNalu(avpkt->data, avpkt->size, *handle->decoderSpecificInfo, hevcData)){ 246 | av_log(NULL, AV_LOG_ERROR, "parse nulu error!"); 247 | return -1; 248 | } 249 | handle->specificInfo = true; 250 | }else{ 251 | hevcData = HEIF::Array(avpkt->size); 252 | std::memcpy(hevcData.elements, avpkt->data, avpkt->size); 253 | } 254 | handle->hevcDatas->push_back(hevcData); 255 | return 0; 256 | } -------------------------------------------------------------------------------- /src/truecut_tcif.h: -------------------------------------------------------------------------------- 1 | #ifndef TRUECUT_TCIF_H_H_ 2 | 3 | #include 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | typedef struct TCIFHandle TCIFHandle; 10 | typedef struct AVIOContext AVIOContext; 11 | typedef struct AVPacket AVPacket; 12 | 13 | /** 14 | * create tcif handle 15 | */ 16 | TCIFHandle* createHandle(const uint8_t* extradata, uint64_t extradata_size, uint32_t width, uint32_t height); 17 | 18 | #define TCIF_SINGLE_IMAGE_FLAG 0 //single frame 19 | #define TCIF_MULTIPLE_IMAGE_FLAG 1 //more than one frame 20 | #define TCIF_VIDEO_FLAG 2 //video 21 | /** 22 | * configure param 23 | */ 24 | int configHandle(TCIFHandle* handle, int flag, uint32_t timeScale, uint64_t duration); 25 | /** 26 | * delete tcif handle 27 | */ 28 | void deleteHandle(TCIFHandle* handle); 29 | /** 30 | * save tcif 31 | */ 32 | int saveImage(TCIFHandle* handle, AVIOContext* avioContext, const char* url); 33 | 34 | /** 35 | * add avpacket into tcif 36 | */ 37 | int addAVPacket(TCIFHandle* handle, AVPacket* avpkt); 38 | 39 | #ifdef __cplusplus 40 | } 41 | #endif 42 | 43 | #endif -------------------------------------------------------------------------------- /src/video_avfilter_test.h: -------------------------------------------------------------------------------- 1 | #ifndef VIDEO_AVFILTER_TEST_H_H_ 2 | #define VIDEO_AVFILTER_TEST_H_H_ 3 | 4 | #include 5 | #include "global.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "avframe_util.h" 11 | 12 | 13 | int video_avfilter_test(){ 14 | 15 | FILE* rFile = fopen("./akiyo_cif.yuv","rb"); 16 | if(rFile == NULL) 17 | return -1; 18 | FILE* wFile = fopen("./akiyo_qcif.yuv","wb"); 19 | 20 | int width = 352; 21 | int height = 288; 22 | 23 | //创建filtergraph 24 | AVFilterGraph *filter_graph = avfilter_graph_alloc(); 25 | /*************************************************buffer过滤器***************************************************/ 26 | //根据名字获取buffer过滤器 27 | const AVFilter *buffersrc = avfilter_get_by_name("buffer"); 28 | AVFilterContext* buffersrc_ctx; //每一个avfilter都会有一个设备上下文与之对应 29 | char in_args[512]; 30 | AVPixelFormat pix_fmts = AV_PIX_FMT_YUV420P; 31 | snprintf(in_args, sizeof(in_args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", width, height, pix_fmts, 1, 25, 1, 1); 32 | //给buffersrc过滤器传入参数,“in”这个名字实质上是给buffersrc_ctx起了个名字,便于filtergraph管理和定位 33 | //当前是视频数据做为源数据,所以当前的参数如上,具体:buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1 34 | //buffer=video_size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1 35 | avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", in_args, NULL, filter_graph); 36 | /*************************************************buffersink过滤器***************************************************/ 37 | //根据名字获取buffersink过滤器,buffersink过滤器是输出过滤后的数据,如缩放后的yuv420p数据 38 | const AVFilter *buffersink = avfilter_get_by_name("buffersink"); 39 | //输出数据的格式设置 40 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 41 | AVPixelFormat out_pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE}; 42 | buffersink_params->pixel_fmts = out_pix_fmts; 43 | AVFilterContext* buffersink_ctx; 44 | avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, buffersink_params, filter_graph); 45 | av_free(buffersink_params); 46 | /**********************************************scale过滤器**********************************************************/ 47 | //根据名字获取scale过滤器,这个过滤器ffmpeg库已经实现了这个过滤器,其内部实现和swscale库实现一样 48 | const AVFilter *scalefilter = avfilter_get_by_name("scale"); 49 | AVFilterContext* scalefilter_ctx; 50 | char scale_args[512]; 51 | snprintf(scale_args, sizeof(scale_args), "%d:%d", width/2, height/2);//参数是如scale=128:64 52 | avfilter_graph_create_filter(&scalefilter_ctx, scalefilter, "resize", scale_args, NULL, filter_graph); 53 | /********************************************转换frame格式********************************************************/ 54 | const AVFilter *pixfmtfilter = avfilter_get_by_name("format"); 55 | AVFilterContext* pixfmtfilter_ctx; 56 | char pixfmt_args[512]; 57 | snprintf(pixfmt_args, sizeof(pixfmt_args), "pix_fmts=%d", AV_PIX_FMT_YUVJ420P);//format=pix_fmts=yuv420p 58 | avfilter_graph_create_filter(&pixfmtfilter_ctx, pixfmtfilter, "format", pixfmt_args, NULL, filter_graph); 59 | /**********************************************各个设备上下文链接*************************************************************************/ 60 | int ret = avfilter_link(buffersrc_ctx, 0, scalefilter_ctx, 0); 61 | ret = avfilter_link(scalefilter_ctx, 0, pixfmtfilter_ctx, 0); 62 | ret = avfilter_link(pixfmtfilter_ctx, 0, buffersink_ctx, 0); 63 | /*****************************************到此为止已经将各个filter串连起来***********************************************/ 64 | //到此为止,过滤器的图初始化完毕 65 | avfilter_graph_config(filter_graph, NULL); 66 | 67 | // int ret = 0; 68 | 69 | // //new一个pin,并与buffer过滤器设备上正文相关联 70 | // AVFilterInOut *outputs = avfilter_inout_alloc(); 71 | // outputs->name = av_strdup("in"); 72 | // outputs->filter_ctx = buffersrc_ctx; 73 | // outputs->pad_idx = 0; 74 | // outputs->next = NULL; 75 | // //new一个pin,并与buffersink过滤器设备上正文相关联 76 | // AVFilterInOut *inputs = avfilter_inout_alloc(); 77 | // inputs->name = av_strdup("out"); 78 | // inputs->filter_ctx = buffersink_ctx; 79 | // inputs->pad_idx = 0; 80 | // inputs->next = NULL; 81 | 82 | // char scale_args[512]; 83 | // snprintf(scale_args, sizeof(scale_args), "scale=%d:%d", width/2, height/2);//参数是如scale=128:64 84 | // //在两个pin之间插入一个字符串描述的过滤器,如上的scale绽放过滤器 85 | // avfilter_graph_parse_ptr(filter_graph, scale_args, &inputs, &outputs, NULL); 86 | // //到此为止,过滤器的图初始化完毕 87 | // avfilter_graph_config(filter_graph, NULL); 88 | 89 | AVFrame* inframe = av_frame_alloc(); 90 | AVFrame *filt_frame = av_frame_alloc(); 91 | 92 | while(1){ 93 | if(feof(rFile)) 94 | break; 95 | 96 | inframe->format = pix_fmts; 97 | inframe->width = width; 98 | inframe->height = height; 99 | av_frame_get_buffer(inframe, 32); 100 | 101 | ReadYUV420FromFile(inframe, rFile);//从yuv文件填充AVFrame 102 | //向buffer设备上下文填充视频帧 103 | if (av_buffersrc_add_frame_flags(buffersrc_ctx, inframe, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 104 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 105 | av_frame_unref(inframe); 106 | break; 107 | } 108 | while (1) {//从buffersink设备上下文获取视频帧 109 | ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); 110 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 111 | break; 112 | if (ret < 0) 113 | break; 114 | //WriteYUV420ToFile(filt_frame, wFile);//将处理后的AVFrame写入到文件 115 | 116 | av_frame_unref(filt_frame); 117 | 118 | } 119 | av_frame_unref(inframe); 120 | } 121 | 122 | av_frame_free(&inframe); 123 | av_frame_free(&filt_frame); 124 | 125 | avfilter_graph_free(&filter_graph); 126 | 127 | fclose(rFile); 128 | fclose(wFile); 129 | 130 | return 0; 131 | } 132 | 133 | #endif 134 | 135 | 136 | -------------------------------------------------------------------------------- /src/video_filter_tool.h: -------------------------------------------------------------------------------- 1 | #ifndef VIDEO_FILTER_TOOL_H_H_ 2 | #define VIDEO_FILTER_TOOL_H_H_ 3 | 4 | #include "global.h" 5 | 6 | /** 7 | * 视频帧过滤器 8 | */ 9 | class VideoFilterManager{ 10 | public: 11 | VideoFilterManager(){ 12 | filter_graph = avfilter_graph_alloc(); 13 | } 14 | ~VideoFilterManager(){ 15 | if(filter_graph){ 16 | avfilter_graph_free(&filter_graph); 17 | filter_graph = NULL; 18 | } 19 | } 20 | /** 21 | * 创建视频入口过滤器 22 | * @filter: 参数字符串,如video_size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1 23 | */ 24 | AVFilterContext* CreateBufferFilter(const char* filter, const char* name){ 25 | const AVFilter *buffersrc = avfilter_get_by_name("buffer"); 26 | return CreateFilterContext(buffersrc, name, filter); 27 | } 28 | /** 29 | * 创建视频帧出口过滤器 30 | * @pix_fmts: 输出的视频帧格式 31 | */ 32 | AVFilterContext* CreateBufferSinkFilter(AVPixelFormat *pix_fmts, const char* name){ 33 | const AVFilter *buffersink = avfilter_get_by_name("buffersink"); 34 | //输出数据的格式设置 35 | AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); 36 | buffersink_params->pixel_fmts = pix_fmts; 37 | AVFilterContext* ctx = CreateFilterContext(buffersink, name, NULL, buffersink_params); 38 | av_free(buffersink_params); 39 | return ctx; 40 | } 41 | /* 42 | *创建AVFilterContext 43 | *@filter: filter对象 44 | *@name: filter对应的名字 45 | *@filter_descr: filter参数字符串 46 | *@opaque: 其他参数 47 | */ 48 | AVFilterContext* CreateFilterContext(const AVFilter* filter, const char* name, const char* filter_descr, void *opaque = NULL){ 49 | AVFilterContext* ctx; 50 | avfilter_graph_create_filter(&ctx, filter, name, filter_descr, opaque, filter_graph); 51 | return ctx; 52 | } 53 | //插入过滤器 54 | int InsertFilter(AVFilterInOut* inputs, AVFilterInOut* outputs, const char* filter_descr){ 55 | return avfilter_graph_parse_ptr(filter_graph, filter_descr, &inputs, &outputs, nullptr); 56 | } 57 | /** 58 | * 配置filter, 完成过滤器的连通 59 | */ 60 | bool FilterConfig(){ 61 | if(avfilter_graph_config(filter_graph, NULL) < 0) 62 | return false; 63 | return true; 64 | } 65 | /** 66 | * 向入口过滤器输入视频帧数据 67 | * @frame: 输入数据 68 | */ 69 | int AddFrame(AVFilterContext* buf_src_ctx, AVFrame* frame){ 70 | if (av_buffersrc_add_frame_flags(buf_src_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { 71 | av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); 72 | return -1; 73 | } 74 | return 0; 75 | } 76 | /** 77 | * 获取过滤后的视频帧数据 78 | * @frame画出数据 79 | */ 80 | int GetFrame(AVFilterContext* buf_sink_ctx, AVFrame* frame){ 81 | return av_buffersink_get_frame(buf_sink_ctx, frame); 82 | } 83 | private: 84 | AVFilterGraph *filter_graph; 85 | }; 86 | 87 | #endif -------------------------------------------------------------------------------- /src/yuv_transfer_test.h: -------------------------------------------------------------------------------- 1 | #include "pw_truecut_yuv_helper.h" 2 | #include "video_filter_tool.h" 3 | #include 4 | #include 5 | #include "libyuv.h" 6 | 7 | #define YUV10BIT 1 8 | 9 | void MergeUVRow(const uint8_t* src_u, 10 | const uint8_t* src_v, 11 | uint8_t* dst_uv, 12 | int width, int depth){ 13 | if(depth <= 8){ 14 | int x; 15 | for (x = 0; x < width - 1; x += 2) { 16 | dst_uv[0] = src_u[x]; 17 | dst_uv[1] = src_v[x]; 18 | dst_uv[2] = src_u[x + 1]; 19 | dst_uv[3] = src_v[x + 1]; 20 | dst_uv += 4; 21 | } 22 | if (width & 1) { 23 | dst_uv[0] = src_u[width - 1]; 24 | dst_uv[1] = src_v[width - 1]; 25 | } 26 | }else{ 27 | int x; 28 | for (x = 0; x < 2*(width - 1); x += 4) { 29 | //16bit u 30 | dst_uv[0] = src_u[x]; 31 | dst_uv[1] = src_u[x+1]; 32 | //16bit v 33 | dst_uv[2] = src_v[x]; 34 | dst_uv[3] = src_v[x+1]; 35 | //16bit u 36 | dst_uv[4] = src_u[x + 2]; 37 | dst_uv[5] = src_u[x + 3]; 38 | //16bit v 39 | dst_uv[6] = src_v[x + 2]; 40 | dst_uv[7] = src_v[x + 3]; 41 | 42 | dst_uv += 8; 43 | } 44 | //width is odd number 45 | if (width & 1) { 46 | dst_uv[0] = src_u[2*(width - 1)]; 47 | dst_uv[1] = src_u[2*width - 1]; 48 | dst_uv[2] = src_v[2*(width - 1)]; 49 | dst_uv[3] = src_v[2*width - 1]; 50 | } 51 | } 52 | } 53 | 54 | 55 | void MergeUVPlane(const uint8_t* src_u, 56 | int src_stride_u, 57 | const uint8_t* src_v, 58 | int src_stride_v, 59 | uint8_t* dst_uv, 60 | int dst_stride_uv, 61 | int width, 62 | int height, 63 | int depth){ 64 | for (int y = 0; y < height; ++y) { 65 | // Merge a row of U and V into a row of UV. 66 | MergeUVRow(src_u, src_v, dst_uv, width, depth); 67 | src_u += src_stride_u; 68 | src_v += src_stride_v; 69 | dst_uv += dst_stride_uv; 70 | } 71 | 72 | } 73 | 74 | 75 | int yuv_transfer_test(){ 76 | int width = 352; 77 | int height = 288; 78 | int format = IMAGE_CSP_YUV444F; 79 | int src_yuv_size, dst_yuv_size; 80 | int linesize; 81 | int in_linesize; 82 | int in_vlinesize; 83 | int depth; 84 | 85 | int top = 5; 86 | int left = 5; 87 | 88 | #if YUV10BIT 89 | FILE* rFile = fopen("./output.yuv","rb"); 90 | FILE* oFile = fopen("./akiyo_cif_10bit444.yuv","wb"); 91 | FILE* ooFile = fopen("./akiyo_cif_10bitnv12.yuv", "wb"); 92 | src_yuv_size = width * height * 3; 93 | dst_yuv_size = (width - 2 * left) * (height - 2 * top) * 6; 94 | depth = 10; 95 | linesize = 2 * (width - 2 * left); 96 | in_linesize = 2 * width; 97 | in_vlinesize = height; 98 | #else 99 | FILE* rFile = fopen("./akiyo_cif.yuv", "rb"); 100 | FILE* oFile = fopen("./akiyo_cif444.yuv","wb"); 101 | FILE* ooFile = fopen("./akiyo_cif_nv12.yuv", "wb"); 102 | src_yuv_size = width * height * 3 >> 1; 103 | dst_yuv_size = (width - 2 * left) * (height - 2 * top) * 3; 104 | depth = 8; 105 | linesize = (width - 2 * left); 106 | in_linesize = width; 107 | in_vlinesize = height; 108 | #endif 109 | 110 | if(rFile == NULL) 111 | return -1; 112 | 113 | 114 | uint8_t* src = new uint8_t[src_yuv_size]; 115 | uint8_t* dst = new uint8_t[dst_yuv_size]; 116 | 117 | while(1){ 118 | if(feof(rFile)) 119 | break; 120 | fread(src, 1, src_yuv_size, rFile); 121 | 122 | switch(format){ 123 | case IMAGE_CSP_NV12:{ 124 | int uv_size = in_linesize * height >> 2; 125 | uint8_t *src_u = new uint8_t[uv_size]; 126 | memcpy(src_u, src + in_linesize * height, uv_size); 127 | uint8_t *src_v = new uint8_t[uv_size]; 128 | memcpy(src_v, src + in_linesize * height + uv_size, uv_size); 129 | int src_stride_u = in_linesize >> 1; 130 | int src_stride_v = in_linesize >> 1; 131 | uint8_t *dst_uv = src + in_linesize * height; 132 | int dst_stride_uv = in_linesize; 133 | 134 | MergeUVPlane(src_u, src_stride_u, src_v, src_stride_v, dst_uv, dst_stride_uv, 135 | width, height >> 1, depth); 136 | 137 | copyValidYUVDataAndToYUV444(src, dst, linesize, top, left, width, height, in_linesize, in_vlinesize, format, depth); 138 | delete []src_u; 139 | delete []src_v; 140 | break; 141 | } 142 | case IMAGE_CSP_YV12:{ 143 | int uv_size = in_linesize * height >> 2; 144 | uint8_t *temp = new uint8_t[uv_size]; 145 | memcpy(temp, src + in_linesize * height, uv_size);//u 146 | memcpy(src + in_linesize * height, src + in_linesize * height + uv_size, uv_size); //v copy u's postion 147 | memcpy(src + in_linesize * height + uv_size, temp, uv_size); 148 | copyValidYUVDataAndToYUV444(src, dst, linesize, top, left, width, height, in_linesize, in_vlinesize, format, depth); 149 | delete[] temp; 150 | break; 151 | } 152 | case IMAGE_CSP_YUV422:{ 153 | int uv_size = in_linesize * height >> 1; 154 | uint8_t *temp = new uint8_t[in_linesize * height * 2]; 155 | memcpy(temp, src, in_linesize * height);//copy y 156 | uint8_t* dst_u = temp + in_linesize * height; 157 | uint8_t* dst_v = dst_u + uv_size; 158 | uint8_t* src_u = src + in_linesize*height; 159 | uint8_t* src_v = src_u + (in_linesize * height >> 2); 160 | for(int i = 0; i < height; i += 2){ 161 | memcpy(dst_u + i*in_linesize/2, src_u + i/2*in_linesize/2, in_linesize/2); 162 | memcpy(dst_u + (i+1)*in_linesize/2, src_u + i/2*in_linesize/2, in_linesize/2); 163 | memcpy(dst_v + i*in_linesize/2, src_v + i/2*in_linesize/2, in_linesize/2); 164 | memcpy(dst_v + (i+1)*in_linesize/2, src_v + i/2*in_linesize/2, in_linesize/2); 165 | } 166 | //fwrite(temp, 1, in_linesize * height * 2, ooFile); 167 | copyValidYUVDataAndToYUV444(temp, dst, linesize, top, left, width, height, in_linesize, in_vlinesize, format, depth); 168 | delete[] temp; 169 | break; 170 | } 171 | case IMAGE_CSP_YUV444F:{ 172 | uint8_t *temp = new uint8_t[in_linesize * height * 3]; 173 | memcpy(temp, src, in_linesize * height);//copy y 174 | uint8_t* dst_u = temp + in_linesize * height; 175 | uint8_t* dst_v = dst_u + in_linesize * height; 176 | uint8_t* src_u = src + in_linesize*height; 177 | uint8_t* src_v = src_u + (in_linesize * height >> 2); 178 | int size = depth > 8 ? 2 : 1; 179 | for(int i = 0; i < height; i += 2){ 180 | for(int j = 0; j < width; j += 2){ 181 | memcpy(dst_u + i*in_linesize + j*size, src_u + i/2*in_linesize/2 + j/2*size, size); 182 | memcpy(dst_u + i*in_linesize + (j + 1) * size, src_u + i/2*in_linesize/2 + j/2*size, size); 183 | memcpy(dst_u + (i + 1) * in_linesize + j*size, src_u + i/2*in_linesize/2 + j/2*size, size); 184 | memcpy(dst_u + (i + 1) * in_linesize + (j + 1) * size, src_u + i/2*in_linesize/2 + j/2*size, size); 185 | 186 | memcpy(dst_v + i*in_linesize + j*size, src_v + i/2*in_linesize/2 + j/2*size, size); 187 | memcpy(dst_v + i*in_linesize + (j + 1) * size, src_v + i/2*in_linesize/2 + j/2*size, size); 188 | memcpy(dst_v + (i + 1) * in_linesize + j*size, src_v + i/2*in_linesize/2 + j/2*size, size); 189 | memcpy(dst_v + (i + 1) * in_linesize + (j + 1) * size, src_v + i/2*in_linesize/2 + j/2*size, size); 190 | } 191 | } 192 | // fwrite(temp, 1, in_linesize * height * 3, ooFile); 193 | copyValidYUVDataAndToYUV444(temp, dst, linesize, top, left, width, height, in_linesize, in_vlinesize, format, depth); 194 | 195 | //bool copyYUV444(void* in, void* dst, int linesize, int top, int left, int w, int h, int o_linesize, int o_vlinesize, int oformat, int depth) 196 | int datasize = depth > 8 ? 2 : 1; 197 | int o_linesize = (width + 10) * datasize; 198 | int o_vlinesize = (height + 10); 199 | int buffer_size = o_linesize * o_vlinesize * 2; 200 | int oformat = IMAGE_CSP_YUV422; 201 | uint8_t* YUVBuffer = new uint8_t[buffer_size]; 202 | memset(YUVBuffer, 0, buffer_size); 203 | copyYUV444(temp, YUVBuffer, in_linesize, 5, 5, width, height, o_linesize, o_vlinesize, oformat, depth); 204 | fwrite(YUVBuffer, 1, buffer_size, ooFile); 205 | delete[] temp; 206 | break; 207 | } 208 | case IMAGE_CSP_V210:{ 209 | int uv_size = in_linesize * height >> 1; 210 | uint8_t *temp = new uint8_t[in_linesize * height * 2]; 211 | memcpy(temp, src, in_linesize * height);//copy y 212 | uint8_t* dst_u = temp + in_linesize * height; 213 | uint8_t* dst_v = dst_u + uv_size; 214 | uint8_t* src_u = src + in_linesize*height; 215 | uint8_t* src_v = src_u + (in_linesize * height >> 2); 216 | for(int i = 0; i < height; i += 2){ 217 | memcpy(dst_u + i*in_linesize/2, src_u + i/2*in_linesize/2, in_linesize/2); 218 | memcpy(dst_u + (i+1)*in_linesize/2, src_u + i/2*in_linesize/2, in_linesize/2); 219 | memcpy(dst_v + i*in_linesize/2, src_v + i/2*in_linesize/2, in_linesize/2); 220 | memcpy(dst_v + (i+1)*in_linesize/2, src_v + i/2*in_linesize/2, in_linesize/2); 221 | } 222 | 223 | uint8_t * UYVYBuffer = new uint8_t[in_linesize * height * 2]; 224 | uint8_t *tempDest = UYVYBuffer; 225 | int size = depth > 8 ? 2 : 1; 226 | uint8_t* dst_y = temp; 227 | for(int h = 0; h < height; h++){ 228 | for(int w = 0, k = 0; w < width; w += 2, k += 4){ 229 | memcpy(tempDest + k * size, dst_u + h * in_linesize / 2 + w/2 * size, size); 230 | memcpy(tempDest + (k + 1) * size, dst_y + h * in_linesize + w * size, size); 231 | memcpy(tempDest + (k + 2) * size, dst_v + h * in_linesize / 2 + w/2 * size, size); 232 | memcpy(tempDest + (k + 3) * size, dst_y + h * in_linesize + (w + 1) * size, size); 233 | } 234 | tempDest += (in_linesize * 2); 235 | } 236 | fwrite(UYVYBuffer, 1, in_linesize * height * 2, ooFile); 237 | copyValidYUVDataAndToYUV444(UYVYBuffer, dst, linesize, top, left, width, height, in_linesize * 2, in_vlinesize, format, depth); 238 | break; 239 | } 240 | default: 241 | copyValidYUVDataAndToYUV444(src, dst, linesize, top, left, width, height, in_linesize, in_vlinesize, format, depth); 242 | break; 243 | } 244 | fwrite(dst, 1, dst_yuv_size, oFile); 245 | } 246 | 247 | fclose(rFile); 248 | fclose(oFile); 249 | fclose(ooFile); 250 | 251 | delete[] src; 252 | delete[] dst; 253 | 254 | return 0; 255 | } --------------------------------------------------------------------------------