├── hehuoren.flv ├── screen_capture.c ├── metadata.c ├── Makefile ├── cut_yuv_frame.c ├── container_parse.c ├── scaling.c ├── ffmpeg_receive.c ├── decoder.c ├── ffmpeg_streamer.c ├── transcoding.c ├── encoder.c ├── demuxer.c ├── filter.c ├── demuxing_decoding.c ├── decoding_encoding.c └── muxer.c /hehuoren.flv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lazybing/ffmpeg-study-recording/HEAD/hehuoren.flv -------------------------------------------------------------------------------- /screen_capture.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | int main(int argc, char **argv) 7 | { 8 | av_register_all(); 9 | avdevice_register_all(); 10 | 11 | AVFormatContext *pFormatCtx = avformat_alloc_context(); 12 | AVInputFormat *iformt = av_find_input_format("video4linux2"); 13 | avformat_open_input(&pFormatCtx, "video=x11grab", iformt, NULL); 14 | 15 | } 16 | -------------------------------------------------------------------------------- /metadata.c: -------------------------------------------------------------------------------- 1 | /* 2 | * @file 3 | * Show how the metadata API can be used in application programs 4 | * @example metadata.c 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | int main(int argc, char **argv) 12 | { 13 | AVFormatContext *fmt_ctx = NULL; 14 | AVDictionaryEntry *tag = NULL; 15 | int ret; 16 | 17 | if(argc != 2){ 18 | printf("usage:%s \n" 19 | "example program to demostrate the use of the libavformat metadata API.\n", argv[0]); 20 | return 1; 21 | } 22 | 23 | av_register_all(); 24 | if((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL))) 25 | return ret; 26 | 27 | printf("AVDictionary count %d\n", 28 | av_dict_count(fmt_ctx->metadata)); 29 | 30 | while((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) 31 | printf("%s=%s\n", tag->key, tag->value); 32 | 33 | avformat_close_input(&fmt_ctx); 34 | 35 | return 0; 36 | } 37 | 38 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ##complie tool 2 | CC = gcc 3 | 4 | DIR_INC = /usr/local/include 5 | DIR_LIB = /usr/local/lib 6 | LIBS = -lavdevice -lavfilter -lfreetype -lpostproc -lavformat -lavcodec -lswscale -lswresample -lavutil -lpthread -lz -lm -lva -lx264 -lx265 -lasound -lxcb -lX11 -lsndio -lXt -lGL -lGLU 7 | 8 | ## source file path 9 | SRC_PATH := . 10 | 11 | all: 12 | ## $(CC) -O0 -g demuxing_decoding.c -o demuxing_decoding -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 13 | ## $(CC) -O0 -g metadata.c -o metadata -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 14 | ## $(CC) -O0 -g container_parse.c -o container_parse -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 15 | ## $(CC) -O0 -g decoder.c -o decoder -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 16 | ## $(CC) -O0 -g encoder.c -o encoder -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 17 | ## $(CC) -O0 -g demuxer.c -o demuxer -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 18 | ## $(CC) -O0 -g muxer.c -o muxer -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 19 | ## $(CC) -O0 -g filter.c -o filter -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 20 | $(CC) -O0 -g scaling.c -o scaling -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 21 | ## $(CC) -O0 -g screen_capture.c -o screen_capture -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 22 | ## $(CC) -O0 -g ffmpeg_streamer.c -o ffmpeg_streamer -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 23 | ## $(CC) -O0 -g ffmpeg_receive.c -o ffmpeg_receive -L$(DIR_LIB) -I$(DIR_INC) $(LIBS) 24 | 25 | clean: 26 | rm demuxing_decoding metadata container_parse encoder decoder demuxer scaling 27 | 28 | .PHONY:clean 29 | 30 | -------------------------------------------------------------------------------- /cut_yuv_frame.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | static char *InputFileName = NULL; 6 | static char *OutputFileName = NULL; 7 | static FILE *fdInputFile = NULL; 8 | static FILE *fdOutputFile = NULL; 9 | static int frameWidth = 0; 10 | static int frameHeight = 0; 11 | static int startFrame = -1; 12 | static int endFrame = -1; 13 | 14 | void parse_argv(int argc, char **argv) 15 | { 16 | int i; 17 | 18 | for(i = 1; i < argc; i++){ 19 | printf("argv %d:%s %s\n", i, argv[i], argv[i+1]); 20 | if(!strcmp(argv[i], "-i")){ 21 | i++; 22 | InputFileName = argv[i]; 23 | } 24 | if(!strcmp(argv[i], "-o")){ 25 | i++; 26 | OutputFileName = argv[i]; 27 | } 28 | if(!strcmp(argv[i], "-w")){ 29 | i++; 30 | frameWidth = atoi(argv[i]); 31 | } 32 | 33 | if(!strcmp(argv[i], "-h")){ 34 | i++; 35 | frameHeight = atoi(argv[i]); 36 | } 37 | 38 | if(!strcmp(argv[i], "-s")){ 39 | i++; 40 | startFrame = atoi(argv[i]); 41 | } 42 | 43 | if(!strcmp(argv[i], "-e")){ 44 | i++; 45 | endFrame = atoi(argv[i]); 46 | } 47 | } 48 | } 49 | 50 | int main(int argc, char **argv) 51 | { 52 | int length; 53 | int framenum; 54 | long long i; 55 | char OutputFrame[20]; 56 | char *ptr = NULL; 57 | char NumFrame[100]; 58 | 59 | parse_argv(argc, argv); 60 | 61 | ptr = malloc(frameWidth); 62 | 63 | fdInputFile = fopen(InputFileName, "rb"); 64 | if(!fdInputFile){ 65 | fprintf(stderr, "Input file open fail\n"); 66 | return -1; 67 | } 68 | 69 | for(framenum = 0; framenum < startFrame; framenum++){ 70 | for(i = 0; i < frameHeight*3/2; i++){ 71 | fread(ptr, 1, frameWidth, fdInputFile); 72 | } 73 | } 74 | 75 | 76 | for(framenum = startFrame; framenum <= endFrame; framenum++){ 77 | strcpy(OutputFrame, "OutputFrame"); 78 | sprintf(NumFrame, "%d", framenum); 79 | strcat(OutputFrame, NumFrame); 80 | strcat(OutputFrame, ".yuv"); 81 | fdOutputFile = fopen(OutputFrame, "wb"); 82 | if(!fdOutputFile){ 83 | fprintf(stderr, "Output file open fail\n"); 84 | return -1; 85 | } 86 | 87 | for(i = 0; i < frameHeight*3/2; i++){ 88 | length = fread(ptr, 1, frameWidth, fdInputFile); 89 | if(length < 0){ 90 | printf("fread file fail\n"); 91 | return -1; 92 | } 93 | 94 | fwrite(ptr, 1, 1920, fdOutputFile); 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /container_parse.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #define TRUE 1 9 | #define FALSE 0 10 | 11 | int ff_parse(char *str) 12 | { 13 | int result; 14 | char filename[1024]; 15 | 16 | //Register all format an codecs 17 | av_register_all(); 18 | 19 | AVFormatContext *fmt_ctx = avformat_alloc_context(); 20 | 21 | result = avformat_open_input(&fmt_ctx, str, NULL, NULL); 22 | if(result < 0){ 23 | printf("Can't open file\n"); 24 | return result; 25 | } 26 | 27 | result = avformat_find_stream_info(fmt_ctx, NULL); 28 | if(result < 0){ 29 | printf("Can't get stream info\n"); 30 | return result; 31 | } 32 | 33 | printf("=================================\n"); 34 | printf("parse the stream info:\n"); 35 | printf("=================================\n"); 36 | printf("Container filename :%s\n", fmt_ctx->filename); 37 | printf("Container input format :%s\n", fmt_ctx->iformat->name); 38 | printf("Container nb_stream :%d\n", fmt_ctx->nb_streams); 39 | printf("Container duration :%llu\n", (long long unsigned int)fmt_ctx->duration); 40 | 41 | int video_stream_idx = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); 42 | if(video_stream_idx >= 0){ 43 | AVStream *video_stream = fmt_ctx->streams[video_stream_idx]; 44 | printf("=================================\n"); 45 | printf("parse Video info:\n"); 46 | printf("=================================\n"); 47 | printf("Video nb_frames :%lld\n", (long long int)video_stream->nb_frames); 48 | printf("Video codec_id :%d\n", video_stream->codec->codec_id); 49 | printf("video codec_name :%s\n", avcodec_get_name(video_stream->codec->codec_id)); 50 | printf("Video width x height :%d x %d\n", video_stream->codec->width, video_stream->codec->height); 51 | printf("Video pix_fmt :%d\n", video_stream->codec->pix_fmt); 52 | printf("Video bitrate :%lld kb/s\n", (long long int)video_stream->codec->bit_rate / 100); 53 | printf("Video avg_frame_rate :%d fps\n", video_stream->avg_frame_rate.num/video_stream->avg_frame_rate.den); 54 | } 55 | 56 | int audio_stream_idx = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); 57 | if(audio_stream_idx >= 0){ 58 | AVStream *audio_stream = fmt_ctx->streams[audio_stream_idx]; 59 | printf("=================================\n"); 60 | printf("parse Audio info:\n"); 61 | printf("=================================\n"); 62 | printf("Audio codec_id :%d\n", audio_stream->codec->codec_id); 63 | printf("Audio codec_name :%s\n", avcodec_get_name(audio_stream->codec->codec_id)); 64 | printf("Audio sample_rate :%d\n", audio_stream->codec->sample_rate); 65 | printf("Audio channels :%d\n", audio_stream->codec->channels); 66 | printf("Audio sample_fmt :%d\n", audio_stream->codec->sample_fmt); 67 | printf("Audio frame_size :%d\n", audio_stream->codec->frame_size); 68 | printf("Audio nb_frames :%lld\n", (long long int)audio_stream->nb_frames); 69 | printf("Audio bitrate :%lld\n", (long long int)audio_stream->codec->bit_rate / 100); 70 | } 71 | 72 | return TRUE; 73 | } 74 | 75 | int main(int argc, char **argv) 76 | { 77 | 78 | ff_parse(argv[1]); 79 | 80 | return TRUE; 81 | } 82 | 83 | -------------------------------------------------------------------------------- /scaling.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | static void fill_yuv_image(uint8_t *data[4], int linesize[4], 6 | int width, int height, int frame_index) 7 | { 8 | int x, y; 9 | 10 | //Y 11 | for(y = 0; y < height; y++) 12 | for(x = 0; x < width; x++) 13 | data[0][y*linesize[0] + x] = x + y + frame_index * 3; 14 | 15 | //Cb and Cr 16 | for(y = 0; y < height/2; y++){ 17 | for(x = 0; x < width/2; x++){ 18 | data[1][y*linesize[1] + x] = 128 + y + frame_index * 2; 19 | data[2][y*linesize[2] + x] = 64 + x + frame_index * 5; 20 | } 21 | } 22 | } 23 | 24 | int main(int argc, char **argv) 25 | { 26 | uint8_t *src_data[4], *dst_data[4]; 27 | int src_linesize[4], dst_linesize[4]; 28 | int src_w = 320, src_h = 240, dst_w, dst_h; 29 | enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_YUV420P; 30 | const char *dst_size = NULL; 31 | const char *dst_filename = NULL; 32 | FILE *dst_file; 33 | int dst_bufsize; 34 | struct SwsContext *sws_ctx; 35 | int i, ret; 36 | 37 | if(argc != 3){ 38 | fprintf(stderr, "Usage:%s output_file output_size\n" 39 | "API example program to show how to scale an image with libswscale.\n" 40 | "This program generate a series of pictures, rescales them to the given " 41 | "output_size and saves them to an output file named output_file\n",argv[0]); 42 | exit(1); 43 | } 44 | dst_filename = argv[1]; 45 | dst_size = argv[2]; 46 | 47 | if(av_parse_video_size(&dst_w, &dst_h, dst_size) < 0){ 48 | fprintf(stderr, "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",dst_size); 49 | exit(1); 50 | } 51 | 52 | dst_file = fopen(dst_filename, "wb"); 53 | if(!dst_file){ 54 | fprintf(stderr, "Could not open destination file %s\n", dst_filename); 55 | exit(1); 56 | } 57 | 58 | //create scaling context 59 | sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt, 60 | dst_w, dst_h, dst_pix_fmt, 61 | SWS_BILINEAR, NULL, NULL, NULL); 62 | if(!sws_ctx){ 63 | fprintf(stderr, 64 | "Impossible to create scale context for the conversion " 65 | "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", 66 | av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, 67 | av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); 68 | ret = AVERROR(EINVAL); 69 | goto end; 70 | } 71 | 72 | //allocate source and destination image buffers 73 | if((ret = av_image_alloc(src_data, src_linesize, 74 | src_w, src_h, src_pix_fmt, 16)) < 0){ 75 | fprintf(stderr, "Could not allocate source image\n"); 76 | goto end; 77 | } 78 | 79 | //buffer is going to be written to rawvideo file, on alignment 80 | if((ret = av_image_alloc(dst_data, dst_linesize, 81 | dst_w, dst_h, dst_pix_fmt, 1)) < 0){ 82 | fprintf(stderr, "Could not allocate destination image\n"); 83 | goto end; 84 | } 85 | dst_bufsize = ret; 86 | 87 | for(i = 0; i < 100; i++){ 88 | //generate synthetic video 89 | fill_yuv_image(src_data, src_linesize, src_w, src_h, i); 90 | 91 | //convert to destination format 92 | sws_scale(sws_ctx, (const uint8_t * const*)src_data, 93 | src_linesize, 0, src_h, dst_data, dst_linesize); 94 | 95 | //write scaled image to file 96 | fwrite(dst_data[0], 1, dst_bufsize, dst_file); 97 | } 98 | 99 | fprintf(stderr, "Scaling succeeded.Play the output file with the command:\n" 100 | "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", 101 | av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename); 102 | 103 | end: 104 | fclose(dst_file); 105 | av_freep(&src_data[0]); 106 | av_freep(&dst_data[0]); 107 | sws_freeContext(sws_ctx); 108 | return ret < 0; 109 | } 110 | -------------------------------------------------------------------------------- /ffmpeg_receive.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | //'1':Use H.264 Bitstream Filter 8 | #define USE_H264BSF 0 9 | 10 | int main(int argc, char **argv) 11 | { 12 | AVOutputFormat *ofmt = NULL; 13 | AVFormatContext *ifmt_ctx = NULL; 14 | AVFormatContext *ofmt_ctx = NULL; 15 | AVPacket pkt; 16 | const char *in_filename, *out_filename; 17 | int ret, i; 18 | int videoindex = -1; 19 | int frame_index = 0; 20 | in_filename = "rtmp://192.168.71.143/live/livestream"; 21 | out_filename = "receive.flv"; 22 | 23 | av_register_all(); 24 | avformat_network_init(); 25 | 26 | //input 27 | if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { 28 | printf( "Could not open input file."); 29 | goto end; 30 | } 31 | if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { 32 | printf( "Failed to retrieve input stream information"); 33 | goto end; 34 | } 35 | 36 | for(i=0; inb_streams; i++) 37 | if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ 38 | videoindex=i; 39 | break; 40 | } 41 | 42 | av_dump_format(ifmt_ctx, 0, in_filename, 0); 43 | 44 | //Output 45 | avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); //RTMP 46 | 47 | if (!ofmt_ctx) { 48 | printf( "Could not create output context\n"); 49 | ret = AVERROR_UNKNOWN; 50 | goto end; 51 | } 52 | ofmt = ofmt_ctx->oformat; 53 | 54 | for(i = 0; i < ifmt_ctx->nb_streams; i++){ 55 | //Create output AVStream according to input AVStream 56 | AVStream *in_stream = ifmt_ctx->streams[1]; 57 | AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); 58 | if(!out_stream){ 59 | ret = AVERROR_UNKNOWN; 60 | goto end; 61 | } 62 | //Copy the setting of AVCodecContext 63 | ret = avcodec_copy_context(out_stream->codec, in_stream->codec); 64 | if (ret < 0) { 65 | printf( "Failed to copy context from input to output stream codec context\n"); 66 | goto end; 67 | } 68 | out_stream->codec->codec_tag = 0; 69 | if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) 70 | out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; 71 | } 72 | 73 | //Dump Format 74 | av_dump_format(ofmt_ctx, 0, out_filename, 1); 75 | //Open output URL 76 | if (!(ofmt->flags & AVFMT_NOFILE)) { 77 | ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); 78 | if (ret < 0) { 79 | printf( "Could not open output URL '%s'", out_filename); 80 | goto end; 81 | } 82 | } 83 | 84 | //Write file header 85 | ret = avformat_write_header(ofmt_ctx, NULL); 86 | if (ret < 0) { 87 | printf( "Error occurred when opening output URL\n"); 88 | goto end; 89 | } 90 | 91 | #if USE_H264BSF 92 | AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); 93 | #endif 94 | 95 | while(1){ 96 | AVStream *in_stream, *out_stream; 97 | //Get an AVPacket 98 | ret = av_read_frame(ifmt_ctx, &pkt); 99 | if(ret < 0) 100 | break; 101 | 102 | in_stream = ifmt_ctx->streams[pkt.stream_index]; 103 | out_stream = ofmt_ctx->streams[pkt.stream_index]; 104 | //copy packet 105 | //convert PTS/DTS 106 | /* 107 | pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 108 | pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 109 | */ 110 | pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); 111 | pkt.pos = -1; 112 | #if USE_H264BSF 113 | av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); 114 | #endif 115 | ret = av_interleaved_write_frame(ofmt_ctx, &pkt); 116 | if(ret < 0){ 117 | printf("Error muxing packet"); 118 | break; 119 | } 120 | av_free_packet(&pkt); 121 | } 122 | #if USE_H264BSF 123 | av_bitstream_filter_close(h264bsfc); 124 | #endif 125 | 126 | //write file trailer 127 | av_write_trailer(ofmt_ctx); 128 | 129 | end: 130 | avformat_close_input(&ifmt_ctx); 131 | //close output 132 | if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) 133 | avio_close(ofmt_ctx->pb); 134 | avformat_free_context(ofmt_ctx); 135 | if (ret < 0 && ret != AVERROR_EOF) { 136 | printf( "Error occurred.\n"); 137 | return -1; 138 | } 139 | return 0; 140 | } 141 | -------------------------------------------------------------------------------- /decoder.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define INBUF_SIZE 4096 7 | 8 | static FILE *pInput_File = NULL; 9 | static FILE *pOutput_File = NULL; 10 | 11 | static char *Input_FileName = NULL; 12 | static char *Output_FileName = NULL; 13 | 14 | static int decode_write_frame(const char *outfilename, AVCodecContext *avctx, 15 | AVFrame *frame, int *frame_count, AVPacket *pkt, int last) 16 | { 17 | int i; 18 | int idx; 19 | int color_idx; 20 | int len, got_frame; 21 | char buf[1024]; 22 | 23 | len = avcodec_decode_video2(avctx, frame, &got_frame, pkt); 24 | if(len < 0){ 25 | fprintf(stderr, "Error while decoding frame %d\n", *frame_count); 26 | return len; 27 | } 28 | 29 | printf("len %d got_frame %d\n",len, got_frame); 30 | 31 | if(got_frame){ 32 | printf("Saving %s frame %3d\n", last?"last":"", *frame_count); 33 | fflush(stdout); 34 | 35 | //the picture is allocated by the decoder, no need to free it 36 | (*frame_count)++; 37 | 38 | fwrite(frame->data[0], 1, frame->width*frame->height, pOutput_File); 39 | fwrite(frame->data[1], 1, (frame->width/2)*(frame->height/2), pOutput_File); 40 | fwrite(frame->data[2], 1, (frame->width/2)*(frame->height/2), pOutput_File); 41 | 42 | if(pkt->data){ 43 | pkt->size -= len; 44 | pkt->data += len; 45 | } 46 | } 47 | 48 | return 0; 49 | } 50 | 51 | int main(int argc, char **argv) 52 | { 53 | int ret = 0; 54 | int len = 0; 55 | int frame_count = 0; 56 | AVCodec *codec = NULL; 57 | AVCodecContext *codecCtx = NULL; 58 | AVCodecParserContext *pCodecParserCtx = NULL; 59 | AVFrame *frame; 60 | AVPacket pkt; 61 | uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; 62 | uint8_t *pDataPtr; 63 | size_t uDataSize; 64 | 65 | 66 | Input_FileName = argv[1]; 67 | Output_FileName = argv[2]; 68 | 69 | pInput_File = fopen(Input_FileName, "rb+"); 70 | if(!pInput_File){ 71 | fprintf(stderr, "Open input file fail\n"); 72 | exit(1); 73 | } 74 | 75 | pOutput_File = fopen(Output_FileName, "wb+"); 76 | if(!pOutput_File){ 77 | fprintf(stderr, "Open output file fail\n"); 78 | exit(1); 79 | } 80 | 81 | //set end of buffer to 0 82 | memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE); 83 | 84 | printf("Decode video file %s to %s\n", Input_FileName, Output_FileName); 85 | 86 | av_register_all(); 87 | 88 | av_init_packet(&pkt); 89 | 90 | codec = avcodec_find_decoder(AV_CODEC_ID_H264); 91 | if(!codec){ 92 | fprintf(stderr, "cannot find the decoder\n"); 93 | exit(1); 94 | } 95 | 96 | codecCtx = avcodec_alloc_context3(codec); 97 | if(!codecCtx){ 98 | fprintf(stderr, "could not allocate video codec context\n"); 99 | exit(1); 100 | } 101 | 102 | if(codec->capabilities & AV_CODEC_CAP_TRUNCATED){ 103 | codecCtx->flags |= AV_CODEC_FLAG_TRUNCATED; 104 | } 105 | 106 | pCodecParserCtx = av_parser_init(AV_CODEC_ID_H264); 107 | if(!pCodecParserCtx){ 108 | fprintf(stderr,"Error:alloc parser fail\n"); 109 | exit(1); 110 | } 111 | 112 | //open the decoder 113 | if(avcodec_open2(codecCtx, codec, NULL) < 0){ 114 | fprintf(stderr, "Could not open the decoder\n"); 115 | exit(1); 116 | } 117 | 118 | //open frame structure 119 | frame = av_frame_alloc(); 120 | if(!frame){ 121 | fprintf(stderr, "Could not allocate video frame\n"); 122 | exit(1); 123 | } 124 | 125 | frame_count = 0; 126 | for(;;){ 127 | uDataSize = fread(inbuf, 1, INBUF_SIZE, pInput_File); 128 | if(uDataSize == 0) 129 | break; 130 | 131 | pDataPtr = inbuf; 132 | while(uDataSize > 0){ 133 | //decode the data in the buffer to AVPacket, include a NAL unit data 134 | len = av_parser_parse2(pCodecParserCtx, codecCtx, &(pkt.data), &(pkt.size), 135 | pDataPtr, uDataSize, 136 | AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE); 137 | uDataSize -= len; 138 | pDataPtr += len; 139 | 140 | if(pkt.size == 0){ 141 | continue; 142 | } 143 | printf("Decode frame pts %d pkt.size %d\n", (int)pkt.pts, (int)pkt.size); 144 | 145 | if(decode_write_frame(Output_FileName, codecCtx, frame, &frame_count, &pkt, 0) < 0){ 146 | exit(1); 147 | } 148 | 149 | } 150 | } 151 | 152 | //decode the data in the decoder itself 153 | pkt.size = 0; 154 | pkt.data = NULL; 155 | decode_write_frame(Output_FileName, codecCtx, frame, &frame_count, &pkt, 0); 156 | 157 | fclose(pInput_File); 158 | fclose(pOutput_File); 159 | av_frame_free(&frame); 160 | 161 | return 0; 162 | } 163 | 164 | -------------------------------------------------------------------------------- /ffmpeg_streamer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int main(int argc, char **argv) 10 | { 11 | AVOutputFormat *ofmt = NULL; 12 | AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; 13 | AVPacket pkt; 14 | const char *in_filename, *out_filename; 15 | int ret, i; 16 | int videoindex = -1; 17 | int frame_index = 0; 18 | int64_t start_time = 0; 19 | in_filename = "cuc_ieschool.flv"; 20 | // out_filename = "rtmp://localhost/publishlive/livestream"; 21 | out_filename = "rtmp://192.168.71.143/live/livestream"; 22 | 23 | //register all codec and format 24 | av_register_all(); 25 | //network 26 | avformat_network_init(); 27 | //input 28 | if((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0){ 29 | printf("Could not open input file\n"); 30 | exit(1); 31 | } 32 | if((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0){ 33 | printf("Failed to retrieve input stream information\n"); 34 | exit(1); 35 | } 36 | 37 | for(i = 0; i < ifmt_ctx->nb_streams; i++){ 38 | if(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 39 | videoindex = i; 40 | break; 41 | } 42 | } 43 | 44 | av_dump_format(ifmt_ctx, 0, in_filename, 0); 45 | 46 | //Output 47 | avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); 48 | if(!ofmt_ctx){ 49 | printf("Could not create output context\n"); 50 | ret = AVERROR_UNKNOWN; 51 | exit(1); 52 | } 53 | ofmt = ofmt_ctx->oformat; 54 | 55 | for(i = 0; i < ifmt_ctx->nb_streams; i++){ 56 | //depends the input stream create output AVStream 57 | AVStream *in_stream = ifmt_ctx->streams[i]; 58 | AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); 59 | if(!out_stream){ 60 | printf("Faile allocate output stream\n"); 61 | ret = AVERROR_UNKNOWN; 62 | exit(1); 63 | } 64 | 65 | //copy the setting of AVCodecContext 66 | ret = avcodec_copy_context(out_stream->codec, in_stream->codec); 67 | if(ret < 0){ 68 | printf("Failed to copy context from input to output stream codec context\n"); 69 | exit(1); 70 | } 71 | out_stream->codec->codec_tag = 0; 72 | if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) 73 | out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; 74 | } 75 | 76 | //Dump output Format 77 | av_dump_format(ofmt_ctx, 0, out_filename, 1); 78 | 79 | //open output URL 80 | if(!(ofmt->flags & AVFMT_NOFILE)){ 81 | ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); 82 | if(ret < 0){ 83 | printf("Could not open output URL '%s'\n", out_filename); 84 | exit(1); 85 | } 86 | } 87 | 88 | //Write file header 89 | ret = avformat_write_header(ofmt_ctx, NULL); 90 | if(ret < 0){ 91 | printf("Error occurred when opening output URL\n"); 92 | exit(1); 93 | } 94 | 95 | start_time = av_gettime(); 96 | while(1){ 97 | AVStream *in_stream, *out_stream; 98 | //get an AVPacket 99 | ret = av_read_frame(ifmt_ctx, &pkt); 100 | if(ret < 0) 101 | break; 102 | //Delay 103 | if(pkt.stream_index == videoindex){ 104 | AVRational time_base = ifmt_ctx->streams[videoindex]->time_base; 105 | AVRational time_base_q = {1, AV_TIME_BASE}; 106 | int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q); 107 | int64_t now_time = av_gettime() - start_time; 108 | if(pts_time > now_time) 109 | av_usleep(pts_time - now_time); 110 | } 111 | 112 | in_stream = ifmt_ctx->streams[pkt.stream_index]; 113 | out_stream = ofmt_ctx->streams[pkt.stream_index]; 114 | 115 | //convert PTS/DTS 116 | pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 117 | pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 118 | pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); 119 | pkt.pos = -1; 120 | 121 | //print to screen 122 | if(pkt.stream_index == videoindex){ 123 | printf("send %8d video frames ot output URL\n", frame_index); 124 | frame_index++; 125 | } 126 | 127 | ret = av_interleaved_write_frame(ofmt_ctx, &pkt); 128 | if(ret < 0){ 129 | printf("Error muxing packet\n"); 130 | break; 131 | 132 | av_free_packet(&pkt); 133 | } 134 | } 135 | 136 | av_write_trailer(ofmt_ctx); 137 | 138 | end: 139 | avformat_close_input(&ifmt_ctx); 140 | //close output 141 | if(ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) 142 | avio_close(ofmt_ctx->pb); 143 | avformat_free_context(ofmt_ctx); 144 | if(ret < 0 && ret != AVERROR_EOF){ 145 | printf("Error occurred.\n"); 146 | return -1; 147 | } 148 | 149 | return 0; 150 | } 151 | 152 | -------------------------------------------------------------------------------- /transcoding.c: -------------------------------------------------------------------------------- 1 | /* 2 | * @file 3 | * API example for demuxing, decoding, filtering, encoding and muxing 4 | * @example transcoding.c 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | static AVFormatContext *ifmt_ctx; 17 | static AVFormatContext *ofmt_ctx; 18 | typedef struct FilteringContext{ 19 | AVFilterContext *buffersink_ctx; 20 | AVFilterContext *buffersrc_ctx; 21 | AVFilterGraph *filter_graph; 22 | }FilteringContext; 23 | static FilteringContext *filter_ctx; 24 | 25 | static int open_input_file(const char *filename) 26 | { 27 | 28 | } 29 | 30 | static int open_output_file(const char *filename) 31 | { 32 | 33 | } 34 | 35 | static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx, 36 | AVCodecContext *enc_ctx, const char *filter_spec) 37 | { 38 | 39 | } 40 | 41 | static int init_filters(void) 42 | { 43 | 44 | } 45 | 46 | static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) 47 | { 48 | 49 | } 50 | 51 | static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) 52 | { 53 | 54 | } 55 | 56 | static int flush_encoder(unsigned int stream_index) 57 | { 58 | 59 | } 60 | 61 | int main(int argc, char **argv) 62 | { 63 | int ret; 64 | AVPacket packet = {.data = NULL, .size = 0}; 65 | AVFrame *frame = NULL; 66 | enum AVMediaType type; 67 | unsigned int stream_index; 68 | unsigned int i; 69 | int got_frame; 70 | int (*dec_fun)(AVCodecContext *, AVFrame *, int *, const AVPacket *); 71 | 72 | if(argc != 3){ 73 | av_log(NULL, AV_LOG_ERROR, "Usage:%s \n", argv[0]); 74 | return 1; 75 | } 76 | 77 | av_register_all(); 78 | avfilter_register_all(); 79 | 80 | if((ret = open_input_file(argv[1])) < 0) 81 | goto end; 82 | if((ret = open_output_file(argv[2])) < 0) 83 | goto end; 84 | if((ret = init_filters()) < 0) 85 | goto end; 86 | 87 | //read all packets 88 | while(1){ 89 | if((ret = av_read_frame(ifmt_ctx, &packet)) < 0) 90 | break; 91 | stream_index = packet.stream_index; 92 | type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type; 93 | av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", 94 | stream_index); 95 | 96 | if(filter_ctx[stream_index].filter_graph){ 97 | av_log(NULL, AV_LOG_DEBUG, "Going to reencode & filter the frame\n"); 98 | frame = av_frame_alloc(); 99 | if(!frame){ 100 | ret = AVERROR(ENUMEM); 101 | break; 102 | } 103 | av_packet_rescale_ts(&packet, 104 | ifmt_ctx->streams[stream_index]->time_base, 105 | ifmt_ctx->streams[stream_index]->codec->time_base); 106 | dec_fun = (type == AVMEDIA_TYPE_VIDEO)?avcodec_decode_video2:avcodec_decode_audio4; 107 | ret = dec_fun(ifmt_ctx->streams[stream_index]->codec, frame, 108 | &got_frame, &packet); 109 | 110 | if(ret < 0){ 111 | av_frame_free(&frame); 112 | av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); 113 | break; 114 | } 115 | 116 | if(got_frame){ 117 | frame->pts = av_frame_get_best_effort_timestamp(frame); 118 | ret = filter_encode_write_frame(frame, stream_index); 119 | av_frame_free(&frame); 120 | if(ret < 0) 121 | goto end; 122 | }else{ 123 | av_frame_free(&frame); 124 | } 125 | }else{ 126 | //remux this frame without reencoding 127 | av_packet_rescale_ts(&packet, 128 | ifmt_ctx->streams[stream_index]->time_base, 129 | ofmt_ctx->streams[stream_index]->time_base); 130 | ret = av_interleaved_write_frame(ofmt_ctx, &packet); 131 | if(ret < 0) 132 | goto end; 133 | } 134 | av_free_packet(&packet); 135 | } 136 | 137 | //flush filters and encoders 138 | for(i = 0; i < ifmt_ctx->nb_stream; i++){ 139 | //flush filter 140 | if(!filter_ctx[i].filter_graph) 141 | continue; 142 | ret = filter_encode_write_frame(NULL, i); 143 | if(ret < 0){ 144 | av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n"); 145 | goto end; 146 | } 147 | 148 | //flush encoder 149 | ret = flush_encoder(i); 150 | if(ret < 0){ 151 | av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n"); 152 | goto end; 153 | } 154 | } 155 | av_write_trailer(ofmt_ctx); 156 | 157 | end: 158 | av_free_packet(&packet); 159 | av_frame_free(&frame); 160 | for(i = 0; i < ifmt_ctx->nb_streams;i++){ 161 | avcodec_close(ifmt_ctx->streams[i]->codec); 162 | if(ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec) 163 | avcodec_close(ifmt_ctx->streams[i]->codec); 164 | if(filter_ctx && filter_ctx[i].filter_graph) 165 | avfilter_graph_free(&filter_ctx[i].filter_graph); 166 | } 167 | av_free(filter_ctx); 168 | avformat_close_input(&ifmt_ctx); 169 | if(ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) 170 | avio_closep(&ofmt_ctx->pb); 171 | avformat_free_context(ofmt_ctx); 172 | 173 | if(ret , 0) 174 | av_log(NULL, AV_LOG_ERROR, "Error occurred:%s\n", av_err2str(ret)); 175 | 176 | return ret ? 1 : 0; 177 | } 178 | -------------------------------------------------------------------------------- /encoder.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | static char *InputFileName = NULL; 9 | static char *OutputFileName = NULL; 10 | static FILE *pInput_File = NULL; 11 | static FILE *pOutput_File = NULL; 12 | 13 | static int frameWidth = 0; 14 | static int frameHeight = 0; 15 | static int bitRate = 0; 16 | static int frameToEncode = 0; 17 | 18 | static enum AVCodecID codec_id = AV_CODEC_ID_NONE; 19 | static char *strcodec = NULL; 20 | 21 | void parse_argv(int argc, char **argv) 22 | { 23 | int i; 24 | 25 | for(i = 1; i < argc; i++){ 26 | printf("argv %d : %s\n", i, argv[i]); 27 | if(!strcmp(argv[i], "-i")) InputFileName = argv[++i]; 28 | if(!strcmp(argv[i], "-o")) OutputFileName = argv[++i]; 29 | if(!strcmp(argv[i], "-codec")){ 30 | i++; 31 | if(!strcmp(argv[i], "264")) { codec_id = AV_CODEC_ID_H264; strcodec = "264";} 32 | if(!strcmp(argv[i], "265")) { codec_id = AV_CODEC_ID_H265; strcodec = "265";} 33 | if(!strcmp(argv[i], "mpeg1")) { codec_id = AV_CODEC_ID_MPEG1VIDEO; strcodec = "mpeg1";} 34 | if(!strcmp(argv[i], "mpeg2")) { codec_id = AV_CODEC_ID_MPEG1VIDEO; strcodec = "mpeg2";} 35 | } 36 | if(!strcmp(argv[i], "-w")) frameWidth = atoi(argv[++i]); 37 | if(!strcmp(argv[i], "-h")) frameHeight = atoi(argv[++i]); 38 | } 39 | 40 | 41 | pInput_File = fopen(InputFileName, "rb+"); 42 | if(!pInput_File){ 43 | fprintf(stderr, "open input file fail\n"); 44 | return; 45 | } 46 | 47 | pOutput_File = fopen(OutputFileName, "wb+"); 48 | if(!pOutput_File){ 49 | fprintf(stderr, "open output file fail\n"); 50 | return; 51 | } 52 | 53 | printf("input %s output %s codec %s\n", InputFileName, OutputFileName, strcodec); 54 | 55 | return; 56 | } 57 | 58 | int main(int argc, char **argv) 59 | { 60 | int ret = 0; 61 | int i, x, y; 62 | int got_output; 63 | AVCodec *codec = NULL; 64 | AVCodecContext *codecCtx = NULL; 65 | AVFrame *frame = NULL; 66 | AVPacket pkt; 67 | 68 | if(argc < 5){ 69 | fprintf(stderr, "Usage:%s -i -o [-codec (264|265|mpeg1|mpeg2)]\n", argv[0]); 70 | exit(1); 71 | } 72 | 73 | //parse argument 74 | parse_argv(argc, argv); 75 | 76 | av_register_all(); 77 | avcodec_register_all(); 78 | 79 | codec = avcodec_find_encoder(codec_id); 80 | if(!codec){ 81 | fprintf(stderr, "Could not find the encoder\n"); 82 | return -1; 83 | } 84 | 85 | codecCtx = avcodec_alloc_context3(codec); 86 | if(!codecCtx){ 87 | fprintf(stderr, "Could not allocate video codec context\n"); 88 | return -1; 89 | } 90 | 91 | codecCtx->bit_rate = 400000; 92 | codecCtx->width = frameWidth; 93 | codecCtx->height = frameHeight; 94 | codecCtx->time_base = (AVRational){1, 25}; 95 | codecCtx->gop_size = 10; 96 | codecCtx->max_b_frames = 1; 97 | codecCtx->pix_fmt = AV_PIX_FMT_YUV420P; 98 | 99 | av_opt_set(codecCtx->priv_data, "preset", "slow", 0); 100 | 101 | //open the encoder 102 | if(avcodec_open2(codecCtx, codec, NULL) < 0){ 103 | fprintf(stderr, "Open encoder fail\n"); 104 | return -1; 105 | } 106 | 107 | //allocate AVFrame structure 108 | frame = av_frame_alloc(); 109 | if(!frame){ 110 | fprintf(stderr, "Could not allocate wideo frame\n"); 111 | return -1; 112 | } 113 | frame->format = codecCtx->pix_fmt; 114 | frame->width = codecCtx->width; 115 | frame->height = codecCtx->height; 116 | 117 | //allocate AVFrame data 118 | ret = av_image_alloc(frame->data, frame->linesize, codecCtx->width, codecCtx->height, 119 | codecCtx->pix_fmt, 32); 120 | if(ret < 0){ 121 | fprintf(stderr, "Could not allocate raw picture buffer\n"); 122 | return -1; 123 | } 124 | 125 | 126 | for(i = 0; i < 98; i++){ 127 | //init AVPacket 128 | av_init_packet(&pkt); 129 | pkt.data = NULL; 130 | pkt.size = 0; 131 | 132 | fflush(stdout); 133 | //prepare a dummy image 134 | /*Y*/ 135 | /* 136 | for(y = 0; y < codecCtx->height; y++){ 137 | for(x = 0; x < codecCtx->width; x++){ 138 | frame->data[0][y*frame->linesize[0] + x] = x + y + i*3; 139 | } 140 | } 141 | */ 142 | 143 | /*Cb and Cr*/ 144 | /* 145 | for(y = 0; y < codecCtx->height/2; y++){ 146 | for(x = 0; x < codecCtx->width/2; x++){ 147 | frame->data[1][y*frame->linesize[1] + x] = 128 + y + i*2; 148 | frame->data[2][y*frame->linesize[2] + x] = 64 + x + i*5; 149 | } 150 | } 151 | */ 152 | fread(frame->data[0], 1, codecCtx->height * codecCtx->width, pInput_File); 153 | fread(frame->data[1], 1, (codecCtx->height/2) * (codecCtx->width/2), pInput_File); 154 | fread(frame->data[2], 1, (codecCtx->height/2) * (codecCtx->width/2), pInput_File); 155 | 156 | 157 | frame->pts = i; 158 | 159 | //encode the image 160 | ret = avcodec_encode_video2(codecCtx, &pkt, frame, &got_output); 161 | if(ret < 0){ 162 | fprintf(stderr, "Error encoding frame\n"); 163 | return -1; 164 | } 165 | 166 | if(got_output){ 167 | printf("Write frame %3d (size=%5d) \n", i, pkt.size); 168 | fwrite(pkt.data, 1, pkt.size, pOutput_File); 169 | av_free_packet(&pkt); 170 | } 171 | } 172 | 173 | //get the delayed frames 174 | for(got_output = 1; got_output;){ 175 | fflush(stdout); 176 | 177 | ret = avcodec_encode_video2(codecCtx, &pkt, NULL, &got_output); 178 | if(ret < 0){ 179 | fprintf(stderr, "Error encoding frame\n"); 180 | return -1; 181 | } 182 | 183 | if(got_output){ 184 | printf("Write frame %3d (size=%5d) \n", i, pkt.size); 185 | fwrite(pkt.data, 1, pkt.size, pOutput_File); 186 | av_free_packet(&pkt); 187 | } 188 | } 189 | 190 | return 0; 191 | } 192 | -------------------------------------------------------------------------------- /demuxer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | static int num; 7 | 8 | static AVFormatContext *fmt_ctx = NULL; 9 | static AVCodecContext *video_codec_ctx = NULL, *audio_codec_ctx; 10 | static AVStream *video_stream = NULL, *audio_stream = NULL; 11 | 12 | static const char *src_filename = NULL; 13 | static const char *video_dst_filename = NULL; 14 | static const char *audio_dst_filename = NULL; 15 | static FILE *video_dst_file = NULL; 16 | static FILE *audio_dst_file = NULL; 17 | 18 | static int width, height; 19 | static enum AVPixelFormat pix_fmt; 20 | static uint8_t *video_dst_data[4] = {NULL}; 21 | static int video_dst_linesize[4]; 22 | static int video_dst_bufsize; 23 | 24 | static int video_stream_idx = -1, audio_stream_idx = -1; 25 | 26 | static AVFrame *frame = NULL; 27 | static AVPacket pkt; 28 | static int video_frame_count = 0; 29 | static int audio_frame_count = 0; 30 | 31 | static int decode_packet(int *got_frame, int cached) 32 | { 33 | int ret = 0; 34 | int decoded = pkt.size; 35 | *got_frame = 0; 36 | 37 | if(pkt.stream_index == video_stream_idx){ 38 | //decode video frame 39 | ret = avcodec_decode_video2(video_codec_ctx, frame, got_frame, &pkt); 40 | if(ret < 0){ 41 | fprintf(stderr, "Error decoding video frame (%s) \n", 42 | av_err2str(ret)); 43 | return ret; 44 | } 45 | 46 | printf("num %d got_frame %d\n", num++, *got_frame); 47 | if(*got_frame){ 48 | av_image_copy(video_dst_data, video_dst_linesize, 49 | (const uint8_t **)(frame->data), frame->linesize, 50 | pix_fmt, width, height); 51 | 52 | //write to raw video file 53 | fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); 54 | } 55 | }else if(pkt.stream_index == video_stream_idx){ 56 | //decode audio frame 57 | ret = avcodec_decode_audio4(audio_codec_ctx, frame, got_frame, &pkt); 58 | if(ret < 0){ 59 | fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret)); 60 | return ret; 61 | } 62 | 63 | if(*got_frame){ 64 | size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format); 65 | fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file); 66 | } 67 | } 68 | 69 | return FFMIN(ret, pkt.size); 70 | } 71 | 72 | static int open_codec_context(int *stream_idx, 73 | AVFormatContext *fmt_ctx, 74 | enum AVMediaType type) 75 | { 76 | int ret, stream_index; 77 | AVStream *pStream; 78 | AVCodecContext *codec_ctx = NULL; 79 | AVCodec *codec; 80 | 81 | ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); 82 | if(ret < 0){ 83 | fprintf(stderr, "Could not find %s stream in input file '%s'\n", 84 | av_get_media_type_string(type), src_filename); 85 | }else{ 86 | stream_index = ret; 87 | pStream = fmt_ctx->streams[stream_index]; 88 | 89 | //find decoder for the stream 90 | codec_ctx = pStream->codec; 91 | codec = avcodec_find_decoder(codec_ctx->codec_id); 92 | if(!codec){ 93 | fprintf(stderr, "Failed to find %s codec\n", 94 | av_get_media_type_string(type)); 95 | return AVERROR(EINVAL); 96 | } 97 | 98 | //open the decoder 99 | if((ret = avcodec_open2(codec_ctx, codec, NULL))< 0){ 100 | fprintf(stderr, "Failed to open %s codec\n", 101 | av_get_media_type_string(type)); 102 | return ret; 103 | } 104 | } 105 | *stream_idx = stream_index; 106 | } 107 | 108 | int main(int argc, char **argv) 109 | { 110 | int ret; 111 | 112 | src_filename = argv[1]; 113 | video_dst_filename = argv[2]; 114 | audio_dst_filename = argv[3]; 115 | 116 | video_dst_file = fopen(video_dst_filename, "wb+"); 117 | if(!video_dst_file){ 118 | fprintf(stderr, "Coulde not open video dst file\n"); 119 | exit(1); 120 | } 121 | 122 | audio_dst_file = fopen(audio_dst_filename, "wb+"); 123 | if(!audio_dst_file){ 124 | fprintf(stderr, "Coulde not open audio dst file\n"); 125 | exit(1); 126 | } 127 | 128 | av_register_all(); 129 | 130 | //open input file, and allocate format context 131 | if(avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0){ 132 | fprintf(stderr, "Could not open source file %s\n", src_filename); 133 | exit(1); 134 | } 135 | 136 | //retrive stream information 137 | if(avformat_find_stream_info(fmt_ctx, NULL) < 0){ 138 | fprintf(stderr, "Could not find stream information\n"); 139 | exit(1); 140 | } 141 | 142 | if(open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0){ 143 | video_stream = fmt_ctx->streams[video_stream_idx]; 144 | video_codec_ctx = video_stream->codec; 145 | 146 | //allocate image where the decoded image will be put 147 | width = video_codec_ctx->width; 148 | height = video_codec_ctx->height; 149 | pix_fmt = video_codec_ctx->pix_fmt; 150 | ret = av_image_alloc(video_dst_data, video_dst_linesize, 151 | width, height, pix_fmt, 1); 152 | if(ret < 0){ 153 | fprintf(stderr, "Could not allocate raw video buffer\n"); 154 | exit(1); 155 | } 156 | video_dst_bufsize = ret; 157 | } 158 | 159 | if(open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0){ 160 | audio_stream = fmt_ctx->streams[audio_stream_idx]; 161 | audio_codec_ctx = audio_stream->codec; 162 | } 163 | 164 | //dump input information to stderr 165 | av_dump_format(fmt_ctx, 0, src_filename, 0); 166 | 167 | //allocate frame 168 | frame = av_frame_alloc(); 169 | if(!frame){ 170 | fprintf(stderr, "Could not allocate frame\n"); 171 | exit(1); 172 | } 173 | 174 | av_init_packet(&pkt); 175 | pkt.data = NULL; 176 | pkt.size = 0; 177 | 178 | //read frames from the file 179 | int got_frame; 180 | while(av_read_frame(fmt_ctx, &pkt) >= 0){ 181 | AVPacket orig_pkt = pkt; 182 | 183 | do{ 184 | ret = decode_packet(&got_frame, 0); 185 | if(ret < 0) 186 | break; 187 | pkt.data += ret; 188 | pkt.size -= ret; 189 | }while(pkt.size > 0); 190 | av_free_packet(&orig_pkt); 191 | } 192 | 193 | avcodec_close(video_codec_ctx); 194 | avcodec_close(audio_codec_ctx); 195 | avformat_close_input(&fmt_ctx); 196 | if(video_dst_file){ 197 | fclose(video_dst_file); 198 | } 199 | 200 | if(audio_dst_file){ 201 | fclose(audio_dst_file); 202 | } 203 | 204 | av_frame_free(&frame); 205 | av_free(video_dst_data[0]); 206 | 207 | return ret < 0; 208 | } 209 | -------------------------------------------------------------------------------- /filter.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | static char *pInput_File_Name = NULL; 12 | static char *pOutput_File_Name = NULL; 13 | 14 | static FILE *pfdInput = NULL; 15 | static FILE *pfdOutput = NULL; 16 | 17 | static int width; 18 | static int height; 19 | 20 | static AVFilterContext *buffersink_ctx; 21 | static AVFilterContext *buffersrc_ctx; 22 | static AVFilterGraph *filter_graph; 23 | 24 | const char *filter_descr = "scale=78:24,transpose=cclock"; 25 | //const char *filter_descr = "drawtext=fontfile=arial.ttf:fontcolor=green:fontsize=30:text='FFMpeg Filter Demo'"; 26 | 27 | void Parse_Args(int argc, char **argv) 28 | { 29 | pInput_File_Name = argv[1]; 30 | pOutput_File_Name = argv[2]; 31 | 32 | width = atoi(argv[3]); 33 | height = atoi(argv[4]); 34 | 35 | pfdInput = fopen(pInput_File_Name, "rb+"); 36 | if(!pfdInput){ 37 | fprintf(stderr, "open input file fail\n"); 38 | exit(1); 39 | } 40 | pfdOutput = fopen(pOutput_File_Name, "wb+"); 41 | if(!pfdOutput){ 42 | fprintf(stderr, "open output file fail\n"); 43 | exit(1); 44 | } 45 | } 46 | 47 | static int init_filters(const char *filter_descr) 48 | { 49 | int ret; 50 | char args[512]; 51 | AVFilter *buffersrc = avfilter_get_by_name("buffer"); 52 | AVFilter *buffersink = avfilter_get_by_name("buffersink"); 53 | AVFilterInOut *outputs = avfilter_inout_alloc(); 54 | AVFilterInOut *inputs = avfilter_inout_alloc(); 55 | enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE}; 56 | AVBufferSinkParams *buffersink_params; 57 | 58 | filter_graph = avfilter_graph_alloc(); 59 | if(!outputs || !inputs || !filter_graph){ 60 | ret = AVERROR(ENOMEM); 61 | goto end; 62 | } 63 | 64 | buffersink_params = av_buffersink_params_alloc(); 65 | buffersink_params->pixel_fmts = pix_fmts; 66 | 67 | snprintf(args, sizeof(args), 68 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 69 | width, height,AV_PIX_FMT_YUV420P,1,25,1,1); 70 | 71 | //buffer video source:the decoded frames from the decoder will be inserted here 72 | ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", 73 | args, NULL, filter_graph); 74 | if(ret < 0){ 75 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); 76 | goto end; 77 | } 78 | 79 | //buffer video sink:to terminate the filter chain 80 | ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "in", 81 | NULL, NULL, filter_graph); 82 | if(ret < 0){ 83 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); 84 | goto end; 85 | } 86 | 87 | outputs->name = av_strdup("in"); 88 | outputs->filter_ctx = buffersrc_ctx; 89 | outputs->pad_idx = 0; 90 | outputs->next = NULL; 91 | 92 | inputs->name = av_strdup("out"); 93 | inputs->filter_ctx = buffersink_ctx; 94 | inputs->pad_idx = 0; 95 | inputs->next = NULL; 96 | 97 | if((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr, 98 | &inputs, &outputs, NULL)) < 0) 99 | goto end; 100 | if((ret = avfilter_graph_config(filter_graph, NULL)) < 0) 101 | goto end; 102 | 103 | end: 104 | avfilter_inout_free(&inputs); 105 | avfilter_inout_free(&outputs); 106 | 107 | return ret; 108 | } 109 | 110 | void init_fram_in_out(AVFrame **framein, AVFrame **frameout, 111 | unsigned char **frame_buffer_in, unsigned char **frame_buffer_out, 112 | int framewidth, int frameheight) 113 | { 114 | *framein = av_frame_alloc(); 115 | *frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, framewidth, frameheight, 1)); 116 | av_image_fill_arrays((*framein)->data, (*framein)->linesize, *frame_buffer_in, AV_PIX_FMT_YUV420P, framewidth, frameheight, 1); 117 | 118 | *frameout = av_frame_alloc(); 119 | *frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, framewidth, frameheight, 1)); 120 | av_image_fill_arrays((*frameout)->data, (*frameout)->linesize, *frame_buffer_out, AV_PIX_FMT_YUV420P, framewidth, frameheight, 1); 121 | 122 | (*framein)->width = framewidth; 123 | (*framein)->height = frameheight; 124 | (*framein)->format = AV_PIX_FMT_YUV420P; 125 | } 126 | 127 | int read_yuv_data_to_buf(unsigned char *frame_buffer_in, FILE *pfdInput, AVFrame **frameIn) 128 | { 129 | AVFrame *pFrameIn = *frameIn; 130 | int framesize = width * height * 3 / 2; 131 | 132 | if(fread(frame_buffer_in, 1, framesize, pfdInput) != framesize) 133 | { return 0;} 134 | 135 | pFrameIn->data[0] = frame_buffer_in; 136 | pFrameIn->data[1] = pFrameIn->data[0] + width * height; 137 | pFrameIn->data[2] = pFrameIn->data[1] + width * height / 4; 138 | 139 | return 1; 140 | } 141 | 142 | int add_frame_to_filter(AVFrame *frameIn) 143 | { 144 | if(av_buffersrc_add_frame(buffersrc_ctx, frameIn) < 0) 145 | return 0; 146 | return 1; 147 | } 148 | 149 | int get_frame_from_filter(AVFrame **frameout) 150 | { 151 | if(av_buffersink_get_frame(buffersink_ctx, *frameout) < 0) 152 | return 0; 153 | return 1; 154 | } 155 | 156 | void write_yuv_to_outfile(const AVFrame *frame_out, FILE *pfdOutput) 157 | { 158 | if(frame_out->format == AV_PIX_FMT_YUV420P) 159 | { 160 | fwrite(frame_out->data[0], 1, frame_out->height * frame_out->width * 3 / 2, pfdOutput); 161 | /* 162 | for(int i=0;iheight;i++) { 163 | fwrite(frame_out->data[0]+frame_out->linesize[0]*i,1,frame_out->width,pfdOutput); 164 | } 165 | for(int i=0;iheight/2;i++) { 166 | fwrite(frame_out->data[1]+frame_out->linesize[1]*i,1,frame_out->width/2,pfdOutput); 167 | } 168 | for(int i=0;iheight/2;i++) { 169 | fwrite(frame_out->data[2]+frame_out->linesize[2]*i,1,frame_out->width/2,pfdOutput); 170 | } 171 | */ 172 | } 173 | } 174 | 175 | int main(int argc, char **argv) 176 | { 177 | int ret; 178 | AVFrame *frame_in = NULL; 179 | AVFrame *frame_out = NULL; 180 | unsigned char *frame_buffer_in = NULL; 181 | unsigned char *frame_buffer_out = NULL; 182 | 183 | //get the input arguments 184 | //(input and output file and width and height) 185 | Parse_Args(argc, argv); 186 | 187 | av_register_all(); 188 | avfilter_register_all(); 189 | 190 | if((ret = init_filters(filter_descr)) != 0){ 191 | printf("init filters fail\n"); 192 | return ret; 193 | } 194 | 195 | init_fram_in_out(&frame_in, &frame_out, 196 | &frame_buffer_in, &frame_buffer_out, 197 | width, height); 198 | 199 | while(read_yuv_data_to_buf(frame_buffer_in, pfdInput, &frame_in)) 200 | { 201 | //put the frame to filter graph 202 | if(!add_frame_to_filter(frame_in)) 203 | { 204 | printf("Error while adding frame\n"); 205 | exit(1); 206 | } 207 | 208 | //get the frame from the filter graph 209 | if(!get_frame_from_filter(&frame_out)) 210 | { 211 | printf("Error while getting frame\n"); 212 | exit(1); 213 | } 214 | 215 | write_yuv_to_outfile(frame_out, pfdOutput); 216 | 217 | printf("Process 1 frame\n"); 218 | av_frame_unref(frame_out); 219 | } 220 | 221 | return 0; 222 | } 223 | -------------------------------------------------------------------------------- /demuxing_decoding.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Demuxing and decoding example. 3 | * 4 | * Show how to use libavformat and libavcodec API to demux 5 | * and decode audio and video data. 6 | * 7 | * */ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | static AVFormatContext *fmt_ctx = NULL; 15 | static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx = NULL; 16 | static int width, height; 17 | static enum AVPixelFormat pix_fmt; 18 | static AVStream *video_stream = NULL, *audio_stream = NULL; 19 | static const char *src_filename = NULL; 20 | static const char *video_dst_filename = NULL; 21 | static const char *audio_dst_filename = NULL; 22 | static FILE *video_dst_file = NULL; 23 | static FILE *audio_dst_file = NULL; 24 | 25 | static uint8_t *video_dst_data[4] = {NULL}; 26 | static int video_dst_linesize[4]; 27 | static int video_dst_bufsize; 28 | 29 | static int video_stream_idx = -1, audio_stream_idx = -1; 30 | 31 | static AVFrame *frame = NULL; 32 | static AVPacket pkt; 33 | static int video_frame_count = 0; 34 | static int audio_frame_count = 0; 35 | 36 | enum{ 37 | API_MODE_OLD = 0, //old method, deprecated 38 | API_MODE_NEW_API_REF_COUNT = 1, 39 | API_MODE_NEW_API_NO_REF_COUNT = 2, 40 | }; 41 | static int api_mode = API_MODE_OLD; 42 | 43 | static int decode_packet(int *got_frame, int cached) 44 | { 45 | int ret = 0; 46 | int decoded = pkt.size; 47 | 48 | *got_frame = 0; 49 | 50 | if(pkt.stream_index == video_stream_idx){ 51 | //decode video frame 52 | ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); 53 | if(ret < 0){ 54 | fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret)); 55 | return ret; 56 | } 57 | 58 | if(*got_frame){ 59 | if(frame->width != width || frame->height != height || 60 | frame->format != pix_fmt){ 61 | //To handle this change, one could call av_image_alloc again and 62 | //decode the following frames into another rawvideo file 63 | fprintf(stderr, "Error:Width, height and pixel format have to be" 64 | "constant in a rawvideo file, but the width, height, or" 65 | "pixel format of the input video changed:\n" 66 | "old:width = %d, height = %d, format = %s\n" 67 | "new:width = %d, height = %d, format = %s\n", 68 | width, height, av_get_pix_fmt_name(pix_fmt), 69 | frame->width, frame->height, 70 | av_get_pix_fmt_name(frame->format)); 71 | return -1; 72 | } 73 | } 74 | 75 | printf("video_frame %s n:%d coded_n:%d pts:%s\n", 76 | cached?"(cached)":"", 77 | video_frame_count++, frame->coded_picture_number, 78 | av_ts2timestr(frame->pts, &video_dec_ctx->time_base)); 79 | 80 | //copy decoded frame to destination buffer: 81 | //this is required since rawvideo expects non aligned data 82 | av_image_copy(video_dst_data, video_dst_linesize, 83 | (const uint8_t **)(frame->data), frame->linesize, 84 | pix_fmt, width, height); 85 | 86 | //write to rawvideo file 87 | fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); 88 | 89 | }else if(pkt.stream_index == audio_stream_idx){ 90 | 91 | } 92 | 93 | //If we use the new API with reference counting, we own the data and need 94 | //to de-referene it when we don't use id anymore 95 | if(*got_frame && api_mode == API_MODE_NEW_API_NO_REF_COUNT) 96 | av_frame_unref(frame); 97 | 98 | return decoded; 99 | } 100 | 101 | static int open_codec_context(int *stream_idx, 102 | AVFormatContext *fmt_ctx, enum AVMediaType type) 103 | { 104 | int ret, stream_index; 105 | AVStream *st; 106 | AVCodecContext *dec_ctx = NULL; 107 | AVCodec *dec = NULL; 108 | AVDictionary *opts = NULL; 109 | 110 | ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); 111 | if(ret < 0){ 112 | fprintf(stderr, "Could not find %s stream in input file '%s' \n", 113 | av_get_media_type_string(type), src_filename); 114 | return ret; 115 | }else{ 116 | stream_index = ret; 117 | st = fmt_ctx->streams[stream_index]; 118 | 119 | //find decoder for the stream 120 | dec_ctx = st->codec; 121 | dec = avcodec_find_decoder(dec_ctx->codec_id); 122 | if(!dec){ 123 | fprintf(stderr, "Failed to find %s codec\n", 124 | av_get_media_type_string(type)); 125 | return AVERROR(EINVAL); 126 | } 127 | 128 | //Init the decoders, with or without reference counting 129 | if(api_mode == API_MODE_NEW_API_REF_COUNT){ 130 | av_dict_set(&opts, "refcounted_frame", "1", 0); 131 | } 132 | 133 | if((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0){ 134 | fprintf(stderr, "Failed to open %s codec\n", 135 | av_get_media_type_string(type)); 136 | return ret; 137 | } 138 | *stream_idx = stream_index; 139 | } 140 | 141 | return 0; 142 | 143 | } 144 | 145 | static int get_format_from_sample_fmt(const char **fmt, 146 | enum AVSampleFormat sample_fmt) 147 | { 148 | int i; 149 | struct sample_fmt_entry{ 150 | enum AVSampleFormat sample_fmt; 151 | const char *fmt_be, *fmt_le; 152 | }sample_fmt_entries[] = { 153 | {AV_SAMPLE_FMT_U8, "u8", "u8"}, 154 | {AV_SAMPLE_FMT_S16, "s16be", "s16le"}, 155 | {AV_SAMPLE_FMT_S32, "s32be", "s32le"}, 156 | {AV_SAMPLE_FMT_FLT, "f32be", "f32le"}, 157 | {AV_SAMPLE_FMT_DBL, "f64be", "f64le"}, 158 | }; 159 | *fmt = NULL; 160 | 161 | for(i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++){ 162 | struct sample_fmt_entry *entry = &sample_fmt_entries[i]; 163 | if(sample_fmt == entry->sample_fmt){ 164 | *fmt = AV_NE(entry->fmt_be, entry->fmt_le); 165 | return 0; 166 | } 167 | } 168 | 169 | fprintf(stderr, 170 | "sample format %s is not supported as output format\n", 171 | av_get_sample_fmt_name(sample_fmt)); 172 | 173 | return -1; 174 | } 175 | 176 | int main(int argc, char **argv) 177 | { 178 | int ret = 0,got_frame; 179 | 180 | if(argc != 4 && argc != 5){ 181 | fprintf(stderr, "usage: %s [-refcount=] " 182 | "input_file video_output_file audio_output_file\n" 183 | "API example program to show how to read frames from an input file.\n" 184 | "This program reads frames from a file, decodes them, and writes decoded\n" 185 | "video frames to a rawvideo file named video_output_file, and decoded\n" 186 | "audio frames to a rawaudio file named audio_output_file.\n\n" 187 | "If the -refcount option is specified, the program use the\n" 188 | "reference counting frame system which allows keeping a copy of\n" 189 | "the data for longer than one decode call. If unset, it's using\n" 190 | "the classic old method.\n" 191 | "\n", argv[0]); 192 | exit(1); 193 | } 194 | 195 | if(argc == 5){ 196 | const char *mode = argv[1] + strlen("-refcount="); 197 | if(!strcmp(mode, "old")) api_mode = API_MODE_OLD; 198 | else if(!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT; 199 | else if(!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT; 200 | else{ 201 | fprintf(stderr, "unknow mode '%s'\n", mode); 202 | exit(1); 203 | } 204 | argv++; 205 | } 206 | 207 | src_filename = argv[1]; 208 | video_dst_filename = argv[2]; 209 | audio_dst_filename = argv[3]; 210 | 211 | // register all formats and codecs 212 | av_register_all(); 213 | 214 | //open input file, and allocate format context 215 | if(avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) 216 | { 217 | fprintf(stderr, "Could not open source file %s\n", src_filename); 218 | exit(1); 219 | } 220 | 221 | //retrieve stream information 222 | if(avformat_find_stream_info(fmt_ctx, NULL) < 0) 223 | { 224 | fprintf(stderr, "Could not find stream information\n"); 225 | exit(1); 226 | } 227 | 228 | if(open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) 229 | { 230 | video_stream = fmt_ctx->streams[video_stream_idx]; 231 | video_dec_ctx = video_stream->codec; 232 | 233 | video_dst_file = fopen(video_dst_filename, "wb"); 234 | if(!video_dst_file){ 235 | fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); 236 | ret = 1; 237 | goto end; 238 | } 239 | 240 | //allocate image where the decoded image will be put 241 | width = video_dec_ctx->width; 242 | height = video_dec_ctx->height; 243 | pix_fmt = video_dec_ctx->pix_fmt; 244 | ret = av_image_alloc(video_dst_data, video_dst_linesize, 245 | width, height, pix_fmt, 1); 246 | 247 | if(ret < 0){ 248 | fprintf(stderr, "Could not allocate raw video buffer\n"); 249 | goto end; 250 | } 251 | video_dst_bufsize = ret; 252 | } 253 | 254 | if(open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) 255 | { 256 | audio_stream = fmt_ctx->streams[audio_stream_idx]; 257 | audio_dec_ctx = audio_stream->codec; 258 | audio_dst_file = fopen(audio_dst_filename, "wb"); 259 | if(!audio_dst_file){ 260 | fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename); 261 | ret = 1; 262 | goto end; 263 | } 264 | } 265 | 266 | //dump input information to stderr 267 | av_dump_format(fmt_ctx, 0, src_filename, 0); 268 | 269 | if(!audio_stream && !video_stream){ 270 | fprintf(stderr, "Could not find audio or video stream in the input, aborting\n"); 271 | ret = 1; 272 | goto end; 273 | } 274 | 275 | //When using the new API, you need to use the libavutil/frame.h API while 276 | //the classic frame management is available in libavcodec 277 | if(api_mode == API_MODE_OLD) 278 | frame = avcodec_alloc_frame(); 279 | else 280 | frame = av_frame_alloc(); 281 | if(!frame){ 282 | fprintf(stderr, "Could not allocate frame\n"); 283 | ret = AVERROR(ENOMEM); 284 | goto end; 285 | } 286 | 287 | //initialize packet, set data to NULL, let the demuxer fill it 288 | av_init_packet(&pkt); 289 | pkt.data = NULL; 290 | pkt.size = 0; 291 | 292 | if(video_stream) 293 | printf("Demuxing video from file '%s' into '%s' \n", src_filename, video_dst_filename); 294 | if(audio_stream) 295 | printf("Demuxing video from file '%s' into '%s' \n", src_filename, audio_dst_filename); 296 | 297 | //read frames from the file 298 | while(av_read_frame(fmt_ctx, &pkt) >= 0){ 299 | AVPacket orig_pkt = pkt; 300 | do{ 301 | ret = decode_packet(&got_frame, 0); 302 | if(ret < 0) 303 | break; 304 | pkt.data += ret; 305 | pkt.size -= ret; 306 | }while(pkt.size > 0); 307 | av_free_packet(&orig_pkt); 308 | } 309 | 310 | //flush cached frames 311 | pkt.data = NULL; 312 | pkt.size = 0; 313 | do{ 314 | decode_packet(&got_frame, 1); 315 | }while(got_frame); 316 | 317 | printf("Demuxing succeeded.\n"); 318 | 319 | if(video_stream){ 320 | printf("Play the output video file with the command:\n" 321 | "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", 322 | av_get_pix_fmt_name(pix_fmt), width, height, 323 | video_dst_filename); 324 | } 325 | 326 | if(audio_stream){ 327 | enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt; 328 | int n_channels = audio_dec_ctx->channels; 329 | const char *fmt; 330 | 331 | if(av_sample_fmt_is_planar(sfmt)){ 332 | const char *packed = av_get_sample_fmt_name(sfmt); 333 | printf("Warning:the sample format the decoder produced is planar " 334 | "(%s). This example will output the first channel only.\n", 335 | packed?packed:"?"); 336 | sfmt = av_get_packed_sample_fmt(sfmt); 337 | n_channels = 1; 338 | } 339 | 340 | if((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0) 341 | goto end; 342 | 343 | printf("Play the output audio file with the command:\n" 344 | "ffplay -f %s -ac %d -ar %d %s\n", 345 | fmt, n_channels, audio_dec_ctx->sample_rate, 346 | audio_dst_filename); 347 | } 348 | 349 | end: 350 | avcodec_close(video_dec_ctx); 351 | avcodec_close(audio_dec_ctx); 352 | avformat_close_input(&fmt_ctx); 353 | if(video_dst_file) 354 | fclose(video_dst_file); 355 | if(audio_dst_file) 356 | fclose(audio_dst_file); 357 | if(api_mode == API_MODE_OLD) 358 | avcodec_free_frame(&frame); 359 | else 360 | av_frame_free(&frame); 361 | av_free(video_dst_data[0]); 362 | 363 | return ret < 0; 364 | } 365 | 366 | -------------------------------------------------------------------------------- /decoding_encoding.c: -------------------------------------------------------------------------------- 1 | /* 2 | * @file 3 | * libavcodec API use example 4 | * 5 | * @example decoding_encoding.c 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #define INBUF_SIZE 4096 18 | #define AUDIO_INBUF_SIZE 20480 19 | #define AUDIO_REFILL_THRESH 4096 20 | 21 | /* 22 | *check that a given sample format is supported by the encoder 23 | */ 24 | static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt) 25 | { 26 | const enum AVSampleFormat *p = codec->sample_fmts; 27 | 28 | while(*p != AV_SAMPLE_FMT_NONE){ 29 | if(*p == sample_fmt) 30 | return 1; 31 | p++; 32 | } 33 | 34 | return 0; 35 | } 36 | 37 | /* just pick the highest supported samplerate*/ 38 | static int select_sample_rate(AVCodec *codec) 39 | { 40 | const int *p; 41 | int best_samplerate = 0; 42 | 43 | if(!codec->supported_samplerates) 44 | return 44100; 45 | 46 | p = codec->supported_samplerates; 47 | while(*p){ 48 | best_samplerate = FFMAX(*p, best_samplerate); 49 | p++; 50 | } 51 | 52 | return best_samplerate; 53 | } 54 | 55 | /*select layout with the highest channel count*/ 56 | static int select_channel_layout(AVCodec *codec) 57 | { 58 | const uint64_t *p; 59 | uint64_t best_ch_layout = 0; 60 | int best_nb_channels = 0; 61 | 62 | if(!codec->channel_layouts) 63 | return AV_CH_LAYOUT_STEREO; 64 | 65 | p = codec->channel_layouts; 66 | while(*p){ 67 | int nb_channels = av_get_channel_layout_nb_channels(*p); 68 | 69 | if(nb_channels > best_nb_channels){ 70 | best_ch_layout = *p; 71 | best_nb_channels=nb_channels; 72 | } 73 | p++; 74 | } 75 | return best_ch_layout; 76 | } 77 | 78 | /* 79 | * Audio encoding example 80 | */ 81 | static void audio_encode_example(const char *filename) 82 | { 83 | AVCodec *codec; 84 | AVCodecContext *c = NULL; 85 | AVFrame *frame; 86 | AVPacket pkt; 87 | int i, j, k, ret, got_output; 88 | int buffer_size; 89 | FILE *f; 90 | uint16_t *samples; 91 | float t, tincr; 92 | 93 | printf("Encode audio file %s\n", filename); 94 | 95 | //find the MP2 encoder 96 | codec = avcodec_find_encoder(AV_CODEC_ID_MP2); 97 | if(!codec){ 98 | fprintf(stderr, "Codec not found\n"); 99 | exit(1); 100 | } 101 | 102 | c = avcodec_alloc_context3(codec); 103 | if(!c){ 104 | fprintf(stderr, "Could not allocate audio codec context\n"); 105 | exit(1); 106 | } 107 | 108 | //put sample parameters 109 | c->bit_rate = 64000; 110 | //check that the encoder supports s16 pcm input 111 | c->sample_fmt = AV_SAMPLE_FMT_S16; 112 | if(!check_sample_fmt(codec, c->sample_fmt)){ 113 | fprintf(stderr, "Encoder does not support sample formt %s", 114 | av_get_sample_fmt_name(c->sample_fmt)); 115 | exit(1); 116 | } 117 | 118 | //select other audio parameters supported by the encoder 119 | c->sample_rate = select_sample_rate(codec); 120 | c->chanel_layout = select_channel_layout(codec); 121 | c->channels = av_get_channel_layout_nb_channels(c->channel_layout); 122 | 123 | //open it 124 | if(avcodec_open2(c, codec, NULL) < 0){ 125 | fprintf(stderr, "Could not open codec\n"); 126 | exit(1); 127 | } 128 | 129 | f = fopen(filename, "wb"); 130 | if(!f){ 131 | fprintf(stderr, "Could not open %s\n", filename); 132 | exit(1); 133 | } 134 | 135 | //frame containing input raw audio 136 | frame = av_frame_alloc(); 137 | if(!frame){ 138 | fprintf(stderr, "Could not allocate audio frame\n"); 139 | exit(1); 140 | } 141 | 142 | frame->nb_samples = c->frame_size; 143 | frame->format = c->sample_fmt; 144 | frame->channel_layout = c->channel_layout; 145 | 146 | //the codec gives us the frame size, in samples 147 | //we calculate the size of the samples buffer in bytes 148 | buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size, c->sample_fmt, 0); 149 | 150 | if(buffer_size < 0){ 151 | fprintf(stderr, "could not get sample buffer size\n"); 152 | exit(1); 153 | } 154 | 155 | samples = av_malloc(buffer_size); 156 | if(!samples){ 157 | fprintf(stderr, "Could not allocate %d bytes for samples buffer\n", 158 | buffer_size); 159 | exit(1); 160 | } 161 | //setup the data pointers in the AVFrame 162 | ret = avcodec_fill_audio_frame(frame, c-.channels, c->sample_fmt, 163 | (const uint8_t *)samples, buffer_size, 0); 164 | if(ret < 0){ 165 | fprintf(stderr, "Could not setup audio frame\n"); 166 | exit(1); 167 | } 168 | 169 | //encode a single tone sound 170 | t = 0; 171 | tincr = 2 * M_PI * 440.0 / c->sample_rate; 172 | for(i = 0; i < 200; i++){ 173 | av_init_packet(&pkt); 174 | pkt.data = NULL;//packet data will be allocated by the encoder 175 | pkt.size = 0; 176 | 177 | for(j = 0; j < c->frame_size; j++){ 178 | samples[2*j] = (int)(sin(t)*10000); 179 | 180 | for(k = 1; k < c->channels; k++) 181 | samples[2*j + k] = samples[2*j]; 182 | t += tincr; 183 | } 184 | 185 | //encode the samples 186 | ret = avcodec_encode_audio2(c, &pkt, frame, &got_output); 187 | if(ret < 0){ 188 | fprintf(stderr, "Error encoding audio frame\n"); 189 | exit(1); 190 | } 191 | if(got_output){ 192 | fwrite(pkt.data, 1, pkt.size, f); 193 | av_free_packet(&pkt); 194 | } 195 | } 196 | 197 | //get the delayed frames 198 | for(got_output = 1; got_output; i++){ 199 | ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output); 200 | if(ret < 0){ 201 | fprintf(stderr, "Error encoding frame\n"); 202 | exit(1); 203 | } 204 | 205 | if(got_output){ 206 | fwrite(pkt.data, 1, pkt.size, f); 207 | av_free_packet(&pkt); 208 | } 209 | } 210 | flcose(f); 211 | 212 | av_freep(&samples); 213 | av_frame_free(&frame); 214 | avcodec_close(c); 215 | av_free(c); 216 | } 217 | 218 | /* 219 | * Audio decoding 220 | */ 221 | static void audio_decode_example(const char *outfilename, const char *filename) 222 | { 223 | AVCodec *codec; 224 | AVCodecContext *c = NULL; 225 | int len; 226 | FILE *f, *outfile; 227 | uin8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; 228 | AVPacket avpkt; 229 | AVFrame *decoded_frame = NULL; 230 | 231 | av_init_packet(&avpkt); 232 | 233 | printf("Decode audio file %s to %s\n", filename, outfilename); 234 | 235 | //find the mpeg audio decoder 236 | codec = avcodec_find_decoder(AV_CODEC_ID_MP2); 237 | if(!codec){ 238 | fprintf(stderr, "Codec not found\n"); 239 | exit(1); 240 | } 241 | 242 | c = avcodec_alloc_context3(codec); 243 | if(!c){ 244 | fprintf(stderr, "Could not allocate video codec context\n"); 245 | exit(1); 246 | } 247 | 248 | //open it 249 | if(avcodec_open2(c, codec, NULL) < 0){ 250 | fprintf(stderr, "Could not open codec\n"); 251 | exit(1); 252 | } 253 | 254 | f = fopen(filename, "rb"); 255 | if(!f){ 256 | fprintf(stderr, "Could not open %s\n", filename); 257 | exit(1); 258 | } 259 | 260 | outfile = fopen(outfilename, "wb"); 261 | if(!outfile){ 262 | av_free(c); 263 | exit(1); 264 | } 265 | 266 | //decode until eof 267 | avpkt.data = inbuf; 268 | avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); 269 | 270 | while(avpkt.size > 0){ 271 | int i, ch; 272 | int got_frame = 0; 273 | 274 | if(!decoded_frame){ 275 | if(!(decoded_frame = av_frame_alloc())){ 276 | fprintf(stderr, "Could not allocate audio frame\n"); 277 | exit(1); 278 | } 279 | } 280 | 281 | len = avcodec_decode_audio4(c, decode_frame, &got_frame, &avpkt); 282 | if(len < 0){ 283 | fprintf(stderr, "Error while decoding\n"); 284 | exit(1); 285 | } 286 | 287 | if(got_frame){ 288 | //if a frame has been decoded, output it 289 | int data_size = av_get_bytes_per_sample(c->sample_fmt); 290 | if(data_size < 0){ 291 | //this should not occur, checking just for paranoia 292 | fprintf(stderr, "Failed to calculate data size\n"); 293 | exit(1); 294 | } 295 | 296 | for(i = 0; i < decode_frame->nb_channels; i++) 297 | for(ch = 0; ch < c->channels; ch++) 298 | fwrite(decode_frame->data[ch] + data_size*i, 1, data_size, outfile); 299 | } 300 | 301 | avpkt.size -= len; 302 | avpkt.data += len; 303 | avpkt.dts = avpkt.pts = AV_NOPTS_VALUE; 304 | if(avpkt.size < AUDIO_REFILL_THRESH){ 305 | /* 306 | * Refill the input buffer, to avido trying to decode 307 | * incomplete frames.Instead of this, one could also use 308 | * a parser, or use a proper container format through libavformat 309 | */ 310 | memmove(inbuf, avpkt.data, avpkt.size); 311 | avpkt.data = inbuf; 312 | len = fread(avpkt.data + avpkt.size, 1, 313 | AUDIO_INBUF_SIZE - avpkt.size, f); 314 | if(len > 0) 315 | avpkt.size += len; 316 | } 317 | } 318 | 319 | fclose(outfile); 320 | fclose(f); 321 | 322 | avcodec_close(c); 323 | av_free(c); 324 | av_frame_free(&decoded_frame); 325 | } 326 | 327 | /* 328 | * Video encoding example 329 | */ 330 | static void video_encode_example(const char *filename, int codec_id) 331 | { 332 | AVCodec *codec; 333 | AVCodecContext *c = NULL; 334 | int i, ret, x, y, got_output; 335 | FILE *f; 336 | AVFrame *frame; 337 | AVPacket pkt; 338 | uint8_t endcode[] = {0, 0, 1, 0xb7}; 339 | 340 | printf("Encode video file %s\n", filename); 341 | 342 | //find the mepg1 video encoder 343 | codec = avcodec_find_encoder(codec_id); 344 | if(!codec){ 345 | fprintf(stderr, "Codec not found\n"); 346 | exit(1); 347 | } 348 | 349 | c = avcodec_alloc_context3(codec); 350 | if(!c){ 351 | fprintf(stderr, "Could not allocate video codec context\n"); 352 | exit(1); 353 | } 354 | 355 | //put sample parameters 356 | c->bit_rate = 400000; 357 | //resolution must be a multiple of two 358 | c->width = 352; 359 | c->height = 288; 360 | //frames per second 361 | c->time_base = (AVRational){1, 25}; 362 | 363 | //emit one intra frmae every ten frames 364 | //check frame pic_type before passing frame 365 | //to encoder, if frame->pict_type is AV_PICTURE_TYPE_I 366 | //then top_size is ignored and the output of encoder 367 | //will always be I frame irrespective to gop_size 368 | c->gop_size = 10; 369 | c->max_b_frames = 1; 370 | c->pix_fmt = AV_PIX_FMT_YUV420P; 371 | 372 | if(codec_id == AV_CODEC_ID_H264) 373 | av_opt_set(c->priv_data, "preset", "slow", 0); 374 | 375 | //open it 376 | if(avcodec_open2(c, codec, NULL) < 0){ 377 | fprintf(stderr, "Could not open codec\n"); 378 | exit(1); 379 | } 380 | 381 | f = fopen(filename, "wb"); 382 | if(!f){ 383 | fprintf(stderr, "Could not open %s\n", filename); 384 | exit(1); 385 | } 386 | 387 | frame = av_frame_alloc(); 388 | if(!frame){ 389 | fprintf(stderr, "Could not allocate video frame\n"); 390 | exit(1); 391 | } 392 | 393 | frame->format = c->pix_fmt; 394 | frame->width = c->width; 395 | frame->height = c->height; 396 | 397 | //the image can be allocated by any means and av_image_alloc() is 398 | //just the most convenient way if av_malloc() is to be used 399 | ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, 400 | c->pix_fmt, 32); 401 | if(ret < 0){ 402 | fprintf(stderr, "Could not allocate raw picture buffer\n"); 403 | exit(1); 404 | } 405 | 406 | //encode 1 second of video 407 | for(i = 0; i < 25; i++){ 408 | av_init_packet(&pkt); 409 | pkt.data = NULL; //package data will be allocated by the encoder 410 | pkt.size = 0; 411 | 412 | fflush(stdout); 413 | //prepare a dummy image 414 | //Y 415 | for(y = 0; y < c->height; y++){ 416 | for(x = 0; x < c->width; x++){ 417 | frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3; 418 | } 419 | } 420 | 421 | //Cb and Cr 422 | for(y = 0; y < c->height/2; y++){ 423 | for(x = 0; x < c->widht/2; x++){ 424 | frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2; 425 | frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5; 426 | } 427 | } 428 | 429 | frame-pts = i; 430 | 431 | //encode the image 432 | ret = avcodec_encode_video2(c, &pkt, frame, &got_output); 433 | if(ret < 0){ 434 | fprintf(stderr, "Error encoding frame\n"); 435 | exit(1); 436 | } 437 | 438 | if(got_output){ 439 | printf("Write frame %3d (size = %5d)\n", i, pkt.size); 440 | fwrite(pkt.data, 1, pkt.size, f); 441 | av_free_packet(&pkt); 442 | } 443 | } 444 | 445 | //get the delayed frames 446 | for(got_output = 1; got_output; i++){ 447 | fflush(stdout); 448 | 449 | ret = avcodec_encode_video2(c, &pkt, NULL, &got_output); 450 | if(ret < 0){ 451 | fprintf(stderr, "Error encoding frame\n"); 452 | exit(1); 453 | } 454 | 455 | if(got_output){ 456 | printf("Write frame %3d (size = %5d)\n",i, pkt.size); 457 | fwrite(pkt.data, 1, pkt.size, f); 458 | av_free_packet(&pkt); 459 | } 460 | } 461 | 462 | //add sequence end code to have a real mpeg file 463 | fwrite(endcode, 1, sizeof(endcode), f); 464 | fclose(f); 465 | 466 | 467 | avcodec_close(c); 468 | av_free(c); 469 | av_freep(&frame->data[0]); 470 | av_frame_free(&frame); 471 | printf("\n"); 472 | } 473 | 474 | /* 475 | * video decoding example 476 | */ 477 | static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, 478 | char *filename) 479 | { 480 | FILE *f; 481 | int i; 482 | 483 | f = fopen(filename, "w"); 484 | fprintf(f, "ps\n%d %d\n%d\n", xsize, ysize, 255); 485 | for(i = 0; i < ysize; i++) 486 | fwrite(buf + i *wrap, 1, xsize, f); 487 | flcose(f); 488 | } 489 | 490 | static int decode_write_frame(const char *outfilename, AVCodecContext *avctx, 491 | AVFrame *frame, int *frame_count, AVPacket *pkt, int last) 492 | { 493 | int len, got_frame; 494 | char buf[1024]; 495 | 496 | len = avcodec_decode_video2(avctx, frame, &got_frame, pkt); 497 | if(len < 0){ 498 | fprintf(stderr, "Error while decoding frame %d\n", *frame_count); 499 | return len; 500 | } 501 | if(got_frame){ 502 | printf("Saving %s frame %3d\n", last?"last":"", *frame_count); 503 | fflush(stdout); 504 | 505 | //the picture is allocated by the decoder, no need to free it 506 | snprintf(buf, sizeof(buf), outfilename, *frame_count); 507 | pgm_save(frame->data[0], frame->linesize[0], 508 | frame->width, frame->height, buf); 509 | (*frame_count)++; 510 | } 511 | 512 | if(pkt->data){ 513 | pkt->size -= len; 514 | pkt->data += len; 515 | } 516 | 517 | return 0; 518 | } 519 | 520 | static void video_decode_example(const char *outfilename, const char *filename) 521 | { 522 | AVCodec *codec; 523 | AVCodecContext *c = NULL; 524 | int frame_count; 525 | FILE *f; 526 | AVFrame *frame; 527 | uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; 528 | AVPacket avpkt; 529 | 530 | av_init_packet(&avpkt); 531 | 532 | //set end of buffer to 0 533 | //(this ensures that no overreading happens for damaged mpeg streams) 534 | memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE); 535 | 536 | printf("Decode video fil %s to %s\n", filename, outfilename); 537 | 538 | //find the mpeg1 video decoder 539 | codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); 540 | if(!codec){ 541 | fprintf(stderr, "Codec not found\n"); 542 | exit(1); 543 | } 544 | 545 | c = avcodec_alloc_context3(codec); 546 | if(!c){ 547 | fprintf(stderr, "Could not allocate video codec context\n"); 548 | exit(1); 549 | } 550 | 551 | if(codec->capabilities & AV_CODEC_CAP_TRUNCATED) 552 | c->flags |= AV_CODEC_FLAG_TRUNCATED; //we do not send complete frames 553 | 554 | //For some codecs, such as msmpeg4 and mpeg4, width and height 555 | //Must be initialized there because this information is not avalable in the bitstream 556 | 557 | //open it 558 | if(avcodec_open2(c, codec, NULL) < 0){ 559 | fprintf(stderr, "Could not open codec\n"); 560 | exit(1); 561 | } 562 | 563 | f = fopen(filename, "rb"); 564 | if(!f){ 565 | fprintf(stderr, "Could not open %s\n", filename); 566 | exit(1); 567 | } 568 | 569 | frame = av_frame_alloc(); 570 | if(!frame){ 571 | fprintf(stderr, "Could not allocate video frame\n"); 572 | exit(1); 573 | } 574 | 575 | frame_count = 0; 576 | for(;;){ 577 | avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); 578 | if(avpkt.size == 0) 579 | break; 580 | 581 | /* 582 | * NOTE1:some codecs are stream based(mpegvideo, mepgaudio) 583 | * and this is the only method to use them because you cannot 584 | * konw the compressed data size before analysing it. 585 | * 586 | * BUT some other codecs(mspeg4, mpeg4) are inherently frame 587 | * based, so you must call them with all the data for one frame 588 | * exactly. You must also initialize 'width' and 'height' 589 | * before initializeing them 590 | */ 591 | 592 | /* 593 | * NOTE2: some codecs allow the raw parameters(frame size, sample rate) 594 | * to be changed at any frame, We handle this, so you should also take care of it. 595 | */ 596 | 597 | //here, we use a stream based decoder(mpeg1video), so we feed decoder and see if it could decode a frame 598 | avpkt.data = inbuf; 599 | while(avpkt.size > 0) 600 | if(decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0) 601 | exit(1); 602 | } 603 | 604 | //some codecs, such as MPEG, transmit the I and P frame with a 605 | //latency of one frame. You must do the following to have a chance to get the last frame of the video 606 | avpkt.data = NULL; 607 | avpkt.size = 0; 608 | decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1); 609 | 610 | fclose(f); 611 | 612 | avcodec_close(c); 613 | av_free(c); 614 | av_frame_free(&frame); 615 | printf("\n"); 616 | } 617 | 618 | 619 | int main(int argc, char **argv) 620 | { 621 | const char *output_type; 622 | 623 | //register all the codecs 624 | avcodec_register_all(); 625 | 626 | if(argc < 2){ 627 | printf("usage:%s output_type\n" 628 | "API example program to decode/encode a media stream with libavcodec.\n" 629 | "This is program generates a synthetic stream and encodes it to a file\n" 630 | "named test.h264, test.mp2 or test.mpg depending on output type\n" 631 | "The encoded stream is then decoded and written to a raw data output\n" 632 | "output type must be chosen between 'h264', 'mp2', 'mpg'\n", 633 | argv[0]); 634 | return 1; 635 | } 636 | 637 | output_type = argv[1]; 638 | 639 | if(!strcmp(output_type, "h264")){ 640 | video_encode_example("test.h264", AV_CODEC_ID_H264); 641 | }else if(!strcmp(output_type, "mp2")){ 642 | audio_encode_example("test.mp2"); 643 | audio_decode_example("test.pcm", "test.mp2"); 644 | }else if(!strcmp(output_type, "mpg")){ 645 | video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO); 646 | video_decode_example("test%02d.pgm", "test.mpg"); 647 | }else{ 648 | fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n", 649 | output_type); 650 | return 1; 651 | } 652 | 653 | return 0; 654 | } 655 | 656 | -------------------------------------------------------------------------------- /muxer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #define STREAM_DURATION 10.0 18 | #define STREAM_FRAME_RATE 25 19 | #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P 20 | 21 | #define SCALE_FLAGS SWS_BICUBIC 22 | 23 | // a wrapper around a single output AVStream 24 | typedef struct OutputStream{ 25 | AVStream *st; 26 | 27 | //pts of the next frame that will be generated 28 | int64_t next_pts; 29 | int samples_count; 30 | 31 | AVFrame *frame; 32 | AVFrame *tmp_frame; 33 | 34 | float t, tincr, tincr2; 35 | 36 | struct SwsContext *sws_ctx; 37 | struct SwsContext *swr_ctx; 38 | }OutputStream; 39 | 40 | static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt) 41 | { 42 | AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; 43 | 44 | printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", 45 | av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base), 46 | av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base), 47 | av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), 48 | pkt->stream_index); 49 | } 50 | 51 | static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt) 52 | { 53 | /* rescale output packet timestamp values from codec to stream timebase */ 54 | av_packet_rescale_ts(pkt, *time_base, st->time_base); 55 | pkt->stream_index = st->index; 56 | 57 | /* Write the compressed frame to the media file. */ 58 | log_packet(fmt_ctx, pkt); 59 | return av_interleaved_write_frame(fmt_ctx, pkt); 60 | } 61 | 62 | // Add an output stream 63 | static void add_stream(OutputStream *ost, AVFormatContext *oc, 64 | AVCodec **codec, enum AVCodecID codec_id) 65 | { 66 | AVCodecContext *c; 67 | int i; 68 | 69 | //find the encoder 70 | *codec = avcodec_find_encoder(codec_id); 71 | if(!(*codec)){ 72 | fprintf(stderr, "Could not find encoder for '%s'\n", 73 | avcodec_get_name(codec_id)); 74 | exit(1); 75 | } 76 | ost->st = avformat_new_stream(oc, *codec); 77 | if(!ost->st){ 78 | fprintf(stderr, "Could not allocate stream\n"); 79 | exit(1); 80 | } 81 | ost->st->id = oc->nb_streams - 1; 82 | c = ost->st->codec; 83 | 84 | switch((*codec)->type){ 85 | case AVMEDIA_TYPE_AUDIO: 86 | c->sample_fmt = (*codec)->sample_fmts? 87 | (*codec)->sample_fmts[0]:AV_SAMPLE_FMT_FLTP; 88 | c->bit_rate = 64000; 89 | c->sample_rate = 44100; 90 | if((*codec)->supported_samplerates){ 91 | c->sample_rate = (*codec)->supported_samplerates[0]; 92 | for(i = 0; (*codec)->supported_samplerates[i]; i++){ 93 | if((*codec)->supported_samplerates[i] == 44100) 94 | c->sample_rate = 44100; 95 | } 96 | } 97 | c->channels = av_get_channel_layout_nb_channels(c->channel_layout); 98 | c->channel_layout = AV_CH_LAYOUT_STEREO; 99 | if((*codec)->channel_layouts){ 100 | c->channel_layout = (*codec)->channel_layouts[0]; 101 | for(i = 0; (*codec)->channel_layouts[i]; i++){ 102 | if((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO) 103 | c->channel_layout = AV_CH_LAYOUT_STEREO; 104 | } 105 | } 106 | c->channels = av_get_channel_layout_nb_channels(c->channel_layout); 107 | ost->st->time_base = (AVRational){1, c->sample_rate}; 108 | break; 109 | case AVMEDIA_TYPE_VIDEO: 110 | c->codec_id = codec_id; 111 | c->bit_rate = 400000; 112 | //Resolution must be a multiple of two 113 | c->width = 352; 114 | c->height = 288; 115 | //timebase:This is the fundamental unit of time(in seconds) in terms 116 | //of which frame timestamps are represented. For fixed-fps content, 117 | //timebase should be 1/framerate and timestamp increments should be identical to 1 118 | ost->st->time_base = (AVRational){1, STREAM_FRAME_RATE}; 119 | c->time_base = ost->st->time_base; 120 | c->gop_size = 12; 121 | c->pix_fmt = STREAM_PIX_FMT; 122 | if(c->codec_id == AV_CODEC_ID_MPEG2VIDEO){ 123 | //just for testing, we also add B frames 124 | c->max_b_frames = 2; 125 | } 126 | if(c->codec_id == AV_CODEC_ID_MPEG1VIDEO){ 127 | //Needed to avoid using macroblocks in which some coeffs overflow. 128 | //This does not happen with normal video, it just happends here as 129 | //the motion of the chroma plane does not match the luma plane 130 | c->mb_decision = 2; 131 | } 132 | 133 | default: 134 | break; 135 | } 136 | //some formats want stream headers to be separate 137 | if(oc->oformat->flags & AVFMT_GLOBALHEADER) 138 | c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; 139 | } 140 | 141 | /* audio output */ 142 | 143 | static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt, 144 | uint64_t channel_layout, 145 | int sample_rate, int nb_samples) 146 | { 147 | AVFrame *frame = av_frame_alloc(); 148 | int ret; 149 | 150 | if (!frame) { 151 | fprintf(stderr, "Error allocating an audio frame\n"); 152 | exit(1); 153 | } 154 | 155 | frame->format = sample_fmt; 156 | frame->channel_layout = channel_layout; 157 | frame->sample_rate = sample_rate; 158 | frame->nb_samples = nb_samples; 159 | 160 | if (nb_samples) { 161 | ret = av_frame_get_buffer(frame, 0); 162 | if (ret < 0) { 163 | fprintf(stderr, "Error allocating an audio buffer\n"); 164 | exit(1); 165 | } 166 | } 167 | 168 | return frame; 169 | } 170 | 171 | 172 | static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) 173 | { 174 | AVCodecContext *c; 175 | int nb_samples; 176 | int ret; 177 | AVDictionary *opt = NULL; 178 | 179 | c = ost->st->codec; 180 | 181 | //open it 182 | av_dict_copy(&opt, opt_arg, 0); 183 | ret = avcodec_open2(c, codec, &opt_arg); 184 | av_dict_free(&opt); 185 | if(ret < 0){ 186 | fprintf(stderr, "Could not open audio codec:%s\n", av_err2str(ret)); 187 | exit(1); 188 | } 189 | 190 | //init signal generator 191 | ost->t = 0; 192 | ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; 193 | //increment frequency by 110 Hz per second 194 | ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; 195 | 196 | if(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) 197 | nb_samples = 10000; 198 | else 199 | nb_samples = c->frame_size; 200 | 201 | ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, 202 | c->sample_rate, nb_samples); 203 | ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, 204 | c->sample_rate, nb_samples); 205 | /* create resampler context */ 206 | ost->swr_ctx = swr_alloc(); 207 | if (!ost->swr_ctx) { 208 | fprintf(stderr, "Could not allocate resampler context\n"); 209 | exit(1); 210 | } 211 | 212 | /* set options */ 213 | av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); 214 | av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); 215 | av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); 216 | av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); 217 | av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); 218 | av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); 219 | 220 | /* initialize the resampling context */ 221 | if ((ret = swr_init(ost->swr_ctx)) < 0) { 222 | fprintf(stderr, "Failed to initialize the resampling context\n"); 223 | exit(1); 224 | } 225 | } 226 | 227 | /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and 228 | * * 'nb_channels' channels. */ 229 | static AVFrame *get_audio_frame(OutputStream *ost) 230 | { 231 | AVFrame *frame = ost->tmp_frame; 232 | int j, i, v; 233 | int16_t *q = (int16_t*)frame->data[0]; 234 | 235 | /* check if we want to generate more frames */ 236 | if (av_compare_ts(ost->next_pts, ost->st->codec->time_base, 237 | STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) 238 | return NULL; 239 | 240 | for (j = 0; j nb_samples; j++) { 241 | v = (int)(sin(ost->t) * 10000); 242 | for (i = 0; i < ost->st->codec->channels; i++) 243 | *q++ = v; 244 | ost->t += ost->tincr; 245 | ost->tincr += ost->tincr2; 246 | } 247 | 248 | frame->pts = ost->next_pts; 249 | ost->next_pts += frame->nb_samples; 250 | 251 | return frame; 252 | } 253 | 254 | /* 255 | * * encode one audio frame and send it to the muxer 256 | * * return 1 when encoding is finished, 0 otherwise 257 | * */ 258 | static int write_audio_frame(AVFormatContext *oc, OutputStream *ost) 259 | { 260 | AVCodecContext *c; 261 | AVPacket pkt = { 0 }; // data and size must be 0; 262 | AVFrame *frame; 263 | int ret; 264 | int got_packet; 265 | int dst_nb_samples; 266 | 267 | av_init_packet(&pkt); 268 | c = ost->st->codec; 269 | 270 | frame = get_audio_frame(ost); 271 | if (frame) { 272 | /* convert samples from native format to destination codec format, using the resampler */ 273 | /* compute destination number of samples */ 274 | dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples, 275 | c->sample_rate, c->sample_rate, AV_ROUND_UP); 276 | av_assert0(dst_nb_samples == frame->nb_samples); 277 | 278 | /* when we pass a frame to the encoder, it may keep a reference to it 279 | * internally; 280 | * make sure we do not overwrite it here 281 | */ 282 | ret = av_frame_make_writable(ost->frame); 283 | if (ret < 0) 284 | exit(1); 285 | 286 | /* convert to destination format */ 287 | ret = swr_convert(ost->swr_ctx, 288 | ost->frame->data, dst_nb_samples, 289 | (const uint8_t **)frame->data, frame->nb_samples); 290 | if (ret < 0) { 291 | fprintf(stderr, "Error while converting\n"); 292 | exit(1); 293 | } 294 | frame = ost->frame; 295 | 296 | frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base); 297 | ost->samples_count += dst_nb_samples; 298 | } 299 | 300 | ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); 301 | if (ret < 0) { 302 | fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); 303 | exit(1); 304 | } 305 | 306 | if (got_packet) { 307 | ret = write_frame(oc, &c->time_base, ost->st, &pkt); 308 | if (ret < 0) { 309 | fprintf(stderr, "Error while writing audio frame: %s\n", 310 | av_err2str(ret)); 311 | exit(1); 312 | } 313 | } 314 | 315 | return (frame || got_packet) ? 0 : 1; 316 | } 317 | 318 | //video output 319 | static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) 320 | { 321 | AVFrame *picture; 322 | int ret; 323 | 324 | picture = av_frame_alloc(); 325 | if(!picture){ 326 | fprintf(stderr, "Could not alloc AVFrame\n"); 327 | return NULL; 328 | } 329 | picture->format = pix_fmt; 330 | picture->width = width; 331 | picture->height = height; 332 | 333 | //allocate the buffers for the frame data 334 | ret = av_frame_get_buffer(picture, 32); 335 | if(ret < 0){ 336 | fprintf(stderr, "Could not allocate frame data\n"); 337 | exit(1); 338 | } 339 | 340 | return picture; 341 | } 342 | 343 | static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) 344 | { 345 | int ret; 346 | AVCodecContext *c = ost->st->codec; 347 | AVDictionary *opt = NULL; 348 | 349 | av_dict_copy(&opt, opt_arg, 0); 350 | 351 | //open the codec 352 | ret = avcodec_open2(c, codec, &opt); 353 | av_dict_free(&opt); 354 | if(ret < 0){ 355 | fprintf(stderr, "Could not open video codec:%s\n", av_err2str(ret)); 356 | exit(1); 357 | } 358 | 359 | //allocate and init a re-usable frame 360 | ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); 361 | if(!ost->frame){ 362 | fprintf(stderr, "Could not allocate video frame\n"); 363 | exit(1); 364 | } 365 | 366 | //If the output format is not YUV420P,then a temporary YUV420P 367 | //picture is needed too.It is then converted to the required output format 368 | ost->tmp_frame = NULL; 369 | if(c->pix_fmt != AV_PIX_FMT_YUV420P){ 370 | ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); 371 | if(!ost->tmp_frame){ 372 | fprintf(stderr, "Could not allocate temporary picture\n"); 373 | exit(1); 374 | } 375 | } 376 | } 377 | 378 | /* Prepare a dummy image. */ 379 | static void fill_yuv_image(AVFrame *pict, int frame_index, 380 | int width, int height) 381 | { 382 | int x, y, i, ret; 383 | 384 | /* when we pass a frame to the encoder, it may keep a reference to it 385 | * * internally; 386 | * * make sure we do not overwrite it here 387 | * */ 388 | ret = av_frame_make_writable(pict); 389 | if (ret < 0) 390 | exit(1); 391 | 392 | i = frame_index; 393 | 394 | /* Y */ 395 | for (y = 0; y < height; y++) 396 | for (x = 0; x < width; x++) 397 | pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; 398 | 399 | /* Cb and Cr */ 400 | for (y = 0; y < height / 2; y++) { 401 | for (x = 0; x < width / 2; x++) { 402 | pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; 403 | pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; 404 | } 405 | } 406 | } 407 | 408 | static AVFrame *get_video_frame(OutputStream *ost) 409 | { 410 | AVCodecContext *c = ost->st->codec; 411 | 412 | /* check if we want to generate more frames */ 413 | if (av_compare_ts(ost->next_pts, ost->st->codec->time_base, 414 | STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) 415 | return NULL; 416 | 417 | if (c->pix_fmt != AV_PIX_FMT_YUV420P) { 418 | /* as we only generate a YUV420P picture, we must convert it 419 | * * to the codec pixel format if needed */ 420 | if (!ost->sws_ctx) { 421 | ost->sws_ctx = sws_getContext(c->width, c->height, 422 | AV_PIX_FMT_YUV420P, 423 | c->width, c->height, 424 | c->pix_fmt, 425 | SCALE_FLAGS, NULL, NULL, NULL); 426 | if (!ost->sws_ctx) { 427 | fprintf(stderr, 428 | "Could not initialize the conversion context\n"); 429 | exit(1); 430 | } 431 | } 432 | fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height); 433 | sws_scale(ost->sws_ctx, 434 | (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize, 435 | 0, c->height, ost->frame->data, ost->frame->linesize); 436 | } else { 437 | fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height); 438 | } 439 | 440 | ost->frame->pts = ost->next_pts++; 441 | 442 | return ost->frame; 443 | } 444 | 445 | 446 | 447 | //encode one video frame and send it to the muxer 448 | //return 1 when encoding is finished , 0 otherwise 449 | static int write_video_frame(AVFormatContext *oc, OutputStream *ost) 450 | { 451 | int ret; 452 | AVCodecContext *c; 453 | AVFrame *frame; 454 | int got_packet = 0; 455 | 456 | c = ost->st->codec; 457 | 458 | frame = get_video_frame(ost); 459 | if(oc->oformat->flags & AVFMT_RAWPICTURE){ 460 | // a hack to avoid data copy width some raw video muxers 461 | AVPacket pkt; 462 | av_init_packet(&pkt); 463 | 464 | if(!frame) 465 | return 1; 466 | 467 | pkt.flags |= AV_PKT_FLAG_KEY; 468 | pkt.stream_index = ost->st->index; 469 | pkt.data = (uint8_t *)frame; 470 | pkt.size = sizeof(AVPicture); 471 | pkt.pts = pkt.dts = frame->pts; 472 | av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base); 473 | 474 | ret = av_interleaved_write_frame(oc, &pkt); 475 | }else{ 476 | AVPacket pkt = {0}; 477 | av_init_packet(&pkt); 478 | 479 | //encodec the image 480 | ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); 481 | if(ret < 0){ 482 | fprintf(stderr, "Error encoding video frame\n"); 483 | exit(1); 484 | } 485 | 486 | if(got_packet){ 487 | ret = write_frame(oc, &c->time_base, ost->st, &pkt); 488 | }else{ 489 | ret = 0; 490 | } 491 | } 492 | 493 | if(ret < 0){ 494 | fprintf(stderr, "Error while writing video frame\n"); 495 | exit(1); 496 | } 497 | 498 | return (frame || got_packet)?0:1; 499 | } 500 | 501 | static void close_stream(AVFormatContext *oc, OutputStream *ost) 502 | { 503 | avcodec_close(ost->st->codec); 504 | av_frame_free(&ost->frame); 505 | av_frame_free(&ost->tmp_frame); 506 | sws_freeContext(ost->sws_ctx); 507 | swr_free(&ost->swr_ctx); 508 | } 509 | 510 | 511 | int main(int argc, char **argv) 512 | { 513 | OutputStream video_st = {0}; 514 | OutputStream audio_st = {0}; 515 | const char *filename; 516 | AVOutputFormat *fmt; 517 | AVFormatContext *oc; 518 | AVCodec *audio_codec; 519 | AVCodec *video_codec; 520 | int ret; 521 | int have_video = 0; 522 | int have_audio = 0; 523 | int encode_video = 0; 524 | int encode_audio = 0; 525 | AVDictionary *opt = NULL; 526 | 527 | av_register_all(); 528 | 529 | if (argc < 2) { 530 | printf("usage: %s output_file\n" 531 | "API example program to output a media file with libavformat.\n" 532 | "This program generates a synthetic audio and video stream, encodes and\n" 533 | "muxes them into a file named output_file.\n" 534 | "The output format is automatically guessed according to the file extension.\n" 535 | "Raw images can also be output by using '%%d' in the filename.\n" 536 | "\n", argv[0]); 537 | return 1; 538 | } 539 | 540 | filename = argv[1]; 541 | if (argc > 3 && !strcmp(argv[2], "-flags")) { 542 | av_dict_set(&opt, argv[2]+1, argv[3], 0); 543 | } 544 | 545 | filename = argv[1]; 546 | 547 | avformat_alloc_output_context2(&oc, NULL, NULL, filename); 548 | if(!oc){ 549 | printf("Could not deduce output format from file extension:using MPEG\n"); 550 | avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); 551 | } 552 | if(!oc) 553 | return 1; 554 | 555 | fmt = oc->oformat; 556 | 557 | // Add the audio and video streams using the default format codecs 558 | // and initialize the codecs 559 | if(fmt->video_codec != AV_CODEC_ID_NONE){ 560 | add_stream(&video_st, oc, &video_codec, fmt->video_codec); 561 | have_video = 1; 562 | encode_video = 1; 563 | } 564 | 565 | if(fmt->audio_codec != AV_CODEC_ID_NONE){ 566 | add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec); 567 | have_audio = 1; 568 | encode_audio = 1; 569 | } 570 | 571 | //Now that all the parameters are set, 572 | //we can open the audio and video codecs and 573 | //allocate the necessary encode buffers 574 | if(have_video){ 575 | open_video(oc, video_codec, &video_st, opt); 576 | } 577 | 578 | if(have_audio){ 579 | open_audio(oc, audio_codec, &audio_st, opt); 580 | } 581 | 582 | av_dump_format(oc, 0, filename, 1); 583 | 584 | //open the output file, if needed 585 | if(!(fmt->flags & AVFMT_NOFILE)){ 586 | ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); 587 | if(ret < 0){ 588 | fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); 589 | return 1; 590 | } 591 | } 592 | 593 | //write the stream header, if any 594 | ret = avformat_write_header(oc, &opt); 595 | if(ret < 0){ 596 | fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); 597 | return 1; 598 | } 599 | 600 | while(encode_video || encode_audio){ 601 | //select the stream to encode 602 | if(encode_video && 603 | (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base, 604 | audio_st.next_pts, audio_st.st->codec->time_base) <= 0)){ 605 | encode_video = !write_video_frame(oc, &video_st); 606 | }else{ 607 | encode_audio = !write_audio_frame(oc, &audio_st); 608 | } 609 | } 610 | 611 | //write the trailer, if any. The trailer must be written before you 612 | //close the CodecContexts open when you wrote the header; 613 | //otherwise av_write_trailer() may try to use memory that was freed on 614 | //av_clodec_close() 615 | av_write_trailer(oc); 616 | 617 | //close each codec 618 | if(have_video) 619 | close_stream(oc, &video_st); 620 | if(have_audio) 621 | close_stream(oc, &audio_st); 622 | 623 | if(!(fmt->flags & AVFMT_NOFILE)) 624 | avio_closep(&oc->pb); 625 | 626 | //free the stream 627 | avformat_free_context(oc); 628 | 629 | return 0; 630 | } 631 | 632 | --------------------------------------------------------------------------------