├── .gitignore ├── Makefile ├── README.md ├── VERSION.txt ├── tutorial01.c ├── tutorial02.c ├── tutorial03.c ├── tutorial04.c ├── tutorial05.c ├── tutorial06.c └── tutorial07.c /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | 6 | # Compiled Dynamic libraries 7 | *.so 8 | 9 | # Compiled Static libraries 10 | *.lai 11 | *.la 12 | *.a 13 | 14 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # http://www.gnu.org/software/make/manual/make.html 3 | # 4 | CC:=gcc 5 | INCLUDES:=$(shell pkg-config --cflags libavformat libavcodec libswresample libswscale libavutil sdl) 6 | CFLAGS:=-Wall -ggdb 7 | LDFLAGS:=$(shell pkg-config --libs libavformat libavcodec libswresample libswscale libavutil sdl) -lm 8 | EXE:=tutorial01.out tutorial02.out tutorial03.out tutorial04.out\ 9 | tutorial05.out tutorial06.out tutorial07.out 10 | 11 | # 12 | # This is here to prevent Make from deleting secondary files. 13 | # 14 | .SECONDARY: 15 | 16 | 17 | # 18 | # $< is the first dependency in the dependency list 19 | # $@ is the target name 20 | # 21 | all: dirs $(addprefix bin/, $(EXE)) tags 22 | 23 | dirs: 24 | mkdir -p obj 25 | mkdir -p bin 26 | 27 | tags: *.c 28 | ctags *.c 29 | 30 | bin/%.out: obj/%.o 31 | $(CC) $(CFLAGS) $< $(LDFLAGS) -o $@ 32 | 33 | obj/%.o : %.c 34 | $(CC) $(CFLAGS) $< $(INCLUDES) -c -o $@ 35 | 36 | clean: 37 | rm -f obj/* 38 | rm -f bin/* 39 | rm -f tags 40 | 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ffmpeg-tutorial 2 | =============== 3 | 4 | * * * 5 | The [original tutorials](http://dranger.com/ffmpeg/) have now been [updated](https://ffmpeg.org/pipermail/libav-user/2015-February/007896.html). 6 | I won't be maintaining this project anymore, and am keeping it here for historical reasons. 7 | * * * 8 | 9 | This repository contains files from an FFmpeg tutorial originally written by 10 | Stephen Dranger (dranger@gmail.com). The files have been updated to work with 11 | the most recent version of FFmpeg (see VERSION.txt for the most recent version 12 | at the time of writing). 13 | The updates were performed with an effort to modify as little code as possible, 14 | so that the original code and tutorial descriptions could be easily consulted. 15 | 16 | The code from the original tutorial and the accompanying description is located 17 | [here](http://dranger.com/ffmpeg/). 18 | 19 | Main changes 20 | ------------ 21 | 22 | * Renamed includes, e.g. ffmpeg/avcodec.h --> libavcodec/avcodec.h 23 | * Work around deprecated functions and symbols (see below) 24 | * Initializing pointers to NULL on declaration. Some FFmpeg functions 25 | (e.g. avformat\_open\_input) now segfault when given uninitialized pointers as 26 | input. 27 | * Removed tutorial08.c, which introduced software scaling (as 28 | opposed to using the img\_convert method). img\_convert has been deprecated 29 | and is no longer available, so these new tutorials use software scaling 30 | from the very beginning, and a separate tutorial is not necessary. 31 | 32 | Deprecated Functions and Symbols 33 | -------------------------------- 34 | 35 | This section describes the changes made to work around deprecated functions 36 | and symbols, in the format: before --> after. In some cases, a simple rename 37 | sufficed (e.g. dump\_format), but in others, more significant changes to the 38 | code were required (e.g. avcodec\_decode\_audio2). Consult the diffs for each 39 | respective tutorial to see exactly what has changed since the original 40 | version of the tutorial. 41 | 42 | * av\_open\_input\_file --> avformat\_open\_input 43 | * av\_find\_stream\_info --> avformat\_find\_stream\_info 44 | * dump\_format --> av\_dump\_format 45 | * CODEC\_TYPE\_VIDEO --> AVMEDIA\_TYPE\_VIDEO 46 | * avcodec\_open --> avcodec\_open2 47 | * avcodec\_decode\_video --> avcodec\_decode\_video2 48 | * img\_convert --> sws\_scale 49 | * av\_close\_input\_file --> avformat\_close\_input 50 | * avcodec\_decode\_audio2 --> avcodec\_decode\_audio4 51 | * CODEC\_TYPE\_AUDIO --> AVMEDIA\_TYPE\_AUDIO 52 | * url\_set\_interrupt\_cb --> avio\_open2 53 | * url\_ferror --> check attribute is->pFormatCtx->pb->error 54 | * pstrcpy --> av\_strlcpy 55 | 56 | Building and Running 57 | -------------------- 58 | 59 | First, make sure you have a recent installation of FFmpeg. It's recommended 60 | that you build FFmpeg from source as described in 61 | [this link](https://ffmpeg.org/trac/ffmpeg/wiki/UbuntuCompilationGuide). 62 | 63 | To build the tutorials: 64 | 65 | git clone git@github.com:chelyaev/ffmpeg-tutorial.git 66 | cd ffmpeg-tutorial 67 | make 68 | 69 | To run a tutorial, first make sure that your ffmpeg installation is on your 70 | $LD\_LIBRARY\_PATH and then: 71 | 72 | bin/tutorial01.out 73 | -------------------------------------------------------------------------------- /VERSION.txt: -------------------------------------------------------------------------------- 1 | N-50314-gf6fff8e 2 | 3 | -------------------------------------------------------------------------------- /tutorial01.c: -------------------------------------------------------------------------------- 1 | // tutorial01.c 2 | // 3 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 4 | // 5 | // Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 6 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 7 | 8 | // A small sample program that shows how to use libavformat and libavcodec to 9 | // read video from a file. 10 | // 11 | // Use the Makefile to build all examples. 12 | // 13 | // Run using 14 | // 15 | // tutorial01 myvideofile.mpg 16 | // 17 | // to write the first five frames from "myvideofile.mpg" to disk in PPM 18 | // format. 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | 26 | void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) { 27 | FILE *pFile; 28 | char szFilename[32]; 29 | int y; 30 | 31 | // Open file 32 | sprintf(szFilename, "frame%d.ppm", iFrame); 33 | pFile=fopen(szFilename, "wb"); 34 | if(pFile==NULL) 35 | return; 36 | 37 | // Write header 38 | fprintf(pFile, "P6\n%d %d\n255\n", width, height); 39 | 40 | // Write pixel data 41 | for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile); 43 | 44 | // Close file 45 | fclose(pFile); 46 | } 47 | 48 | int main(int argc, char *argv[]) { 49 | AVFormatContext *pFormatCtx = NULL; 50 | int i, videoStream; 51 | AVCodecContext *pCodecCtx = NULL; 52 | AVCodec *pCodec = NULL; 53 | AVFrame *pFrame = NULL; 54 | AVFrame *pFrameRGB = NULL; 55 | AVPacket packet; 56 | int frameFinished; 57 | int numBytes; 58 | uint8_t *buffer = NULL; 59 | 60 | AVDictionary *optionsDict = NULL; 61 | struct SwsContext *sws_ctx = NULL; 62 | 63 | if(argc < 2) { 64 | printf("Please provide a movie file\n"); 65 | return -1; 66 | } 67 | // Register all formats and codecs 68 | av_register_all(); 69 | 70 | // Open video file 71 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 72 | return -1; // Couldn't open file 73 | 74 | // Retrieve stream information 75 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 76 | return -1; // Couldn't find stream information 77 | 78 | // Dump information about file onto standard error 79 | av_dump_format(pFormatCtx, 0, argv[1], 0); 80 | 81 | // Find the first video stream 82 | videoStream=-1; 83 | for(i=0; inb_streams; i++) 84 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 85 | videoStream=i; 86 | break; 87 | } 88 | if(videoStream==-1) 89 | return -1; // Didn't find a video stream 90 | 91 | // Get a pointer to the codec context for the video stream 92 | pCodecCtx=pFormatCtx->streams[videoStream]->codec; 93 | 94 | // Find the decoder for the video stream 95 | pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 96 | if(pCodec==NULL) { 97 | fprintf(stderr, "Unsupported codec!\n"); 98 | return -1; // Codec not found 99 | } 100 | // Open codec 101 | if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) 102 | return -1; // Could not open codec 103 | 104 | // Allocate video frame 105 | pFrame=av_frame_alloc(); 106 | 107 | // Allocate an AVFrame structure 108 | pFrameRGB=av_frame_alloc(); 109 | if(pFrameRGB==NULL) 110 | return -1; 111 | 112 | // Determine required buffer size and allocate buffer 113 | numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 114 | pCodecCtx->height); 115 | buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 116 | 117 | sws_ctx = 118 | sws_getContext 119 | ( 120 | pCodecCtx->width, 121 | pCodecCtx->height, 122 | pCodecCtx->pix_fmt, 123 | pCodecCtx->width, 124 | pCodecCtx->height, 125 | PIX_FMT_RGB24, 126 | SWS_BILINEAR, 127 | NULL, 128 | NULL, 129 | NULL 130 | ); 131 | 132 | // Assign appropriate parts of buffer to image planes in pFrameRGB 133 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 134 | // of AVPicture 135 | avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 136 | pCodecCtx->width, pCodecCtx->height); 137 | 138 | // Read frames and save first five frames to disk 139 | i=0; 140 | while(av_read_frame(pFormatCtx, &packet)>=0) { 141 | // Is this a packet from the video stream? 142 | if(packet.stream_index==videoStream) { 143 | // Decode video frame 144 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 145 | &packet); 146 | 147 | // Did we get a video frame? 148 | if(frameFinished) { 149 | // Convert the image from its native format to RGB 150 | sws_scale 151 | ( 152 | sws_ctx, 153 | (uint8_t const * const *)pFrame->data, 154 | pFrame->linesize, 155 | 0, 156 | pCodecCtx->height, 157 | pFrameRGB->data, 158 | pFrameRGB->linesize 159 | ); 160 | 161 | // Save the frame to disk 162 | if(++i<=5) 163 | SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 164 | i); 165 | } 166 | } 167 | 168 | // Free the packet that was allocated by av_read_frame 169 | av_free_packet(&packet); 170 | } 171 | 172 | // Free the RGB image 173 | av_free(buffer); 174 | av_free(pFrameRGB); 175 | 176 | // Free the YUV frame 177 | av_free(pFrame); 178 | 179 | // Close the codec 180 | avcodec_close(pCodecCtx); 181 | 182 | // Close the video file 183 | avformat_close_input(&pFormatCtx); 184 | 185 | return 0; 186 | } 187 | -------------------------------------------------------------------------------- /tutorial02.c: -------------------------------------------------------------------------------- 1 | // tutorial02.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can. 3 | // 4 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 5 | // 6 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 7 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 8 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 9 | // 10 | // Use the Makefile to build all examples. 11 | // 12 | // Run using 13 | // tutorial02 myvideofile.mpg 14 | // 15 | // to play the video stream on your screen. 16 | 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | 25 | #ifdef __MINGW32__ 26 | #undef main /* Prevents SDL from overriding main() */ 27 | #endif 28 | 29 | #include 30 | 31 | int main(int argc, char *argv[]) { 32 | AVFormatContext *pFormatCtx = NULL; 33 | int i, videoStream; 34 | AVCodecContext *pCodecCtx = NULL; 35 | AVCodec *pCodec = NULL; 36 | AVFrame *pFrame = NULL; 37 | AVPacket packet; 38 | int frameFinished; 39 | //float aspect_ratio; 40 | 41 | AVDictionary *optionsDict = NULL; 42 | struct SwsContext *sws_ctx = NULL; 43 | 44 | SDL_Overlay *bmp = NULL; 45 | SDL_Surface *screen = NULL; 46 | SDL_Rect rect; 47 | SDL_Event event; 48 | 49 | if(argc < 2) { 50 | fprintf(stderr, "Usage: test \n"); 51 | exit(1); 52 | } 53 | // Register all formats and codecs 54 | av_register_all(); 55 | 56 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 57 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 58 | exit(1); 59 | } 60 | 61 | // Open video file 62 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 63 | return -1; // Couldn't open file 64 | 65 | // Retrieve stream information 66 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 67 | return -1; // Couldn't find stream information 68 | 69 | // Dump information about file onto standard error 70 | av_dump_format(pFormatCtx, 0, argv[1], 0); 71 | 72 | // Find the first video stream 73 | videoStream=-1; 74 | for(i=0; inb_streams; i++) 75 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 76 | videoStream=i; 77 | break; 78 | } 79 | if(videoStream==-1) 80 | return -1; // Didn't find a video stream 81 | 82 | // Get a pointer to the codec context for the video stream 83 | pCodecCtx=pFormatCtx->streams[videoStream]->codec; 84 | 85 | // Find the decoder for the video stream 86 | pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 87 | if(pCodec==NULL) { 88 | fprintf(stderr, "Unsupported codec!\n"); 89 | return -1; // Codec not found 90 | } 91 | 92 | // Open codec 93 | if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) 94 | return -1; // Could not open codec 95 | 96 | // Allocate video frame 97 | pFrame=av_frame_alloc(); 98 | 99 | // Make a screen to put our video 100 | #ifndef __DARWIN__ 101 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); 102 | #else 103 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); 104 | #endif 105 | if(!screen) { 106 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 107 | exit(1); 108 | } 109 | 110 | // Allocate a place to put our YUV image on that screen 111 | bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 112 | pCodecCtx->height, 113 | SDL_YV12_OVERLAY, 114 | screen); 115 | 116 | sws_ctx = 117 | sws_getContext 118 | ( 119 | pCodecCtx->width, 120 | pCodecCtx->height, 121 | pCodecCtx->pix_fmt, 122 | pCodecCtx->width, 123 | pCodecCtx->height, 124 | PIX_FMT_YUV420P, 125 | SWS_BILINEAR, 126 | NULL, 127 | NULL, 128 | NULL 129 | ); 130 | 131 | // Read frames and save first five frames to disk 132 | i=0; 133 | while(av_read_frame(pFormatCtx, &packet)>=0) { 134 | // Is this a packet from the video stream? 135 | if(packet.stream_index==videoStream) { 136 | // Decode video frame 137 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 138 | &packet); 139 | 140 | // Did we get a video frame? 141 | if(frameFinished) { 142 | SDL_LockYUVOverlay(bmp); 143 | 144 | AVPicture pict; 145 | pict.data[0] = bmp->pixels[0]; 146 | pict.data[1] = bmp->pixels[2]; 147 | pict.data[2] = bmp->pixels[1]; 148 | 149 | pict.linesize[0] = bmp->pitches[0]; 150 | pict.linesize[1] = bmp->pitches[2]; 151 | pict.linesize[2] = bmp->pitches[1]; 152 | 153 | // Convert the image into YUV format that SDL uses 154 | sws_scale 155 | ( 156 | sws_ctx, 157 | (uint8_t const * const *)pFrame->data, 158 | pFrame->linesize, 159 | 0, 160 | pCodecCtx->height, 161 | pict.data, 162 | pict.linesize 163 | ); 164 | 165 | SDL_UnlockYUVOverlay(bmp); 166 | 167 | rect.x = 0; 168 | rect.y = 0; 169 | rect.w = pCodecCtx->width; 170 | rect.h = pCodecCtx->height; 171 | SDL_DisplayYUVOverlay(bmp, &rect); 172 | 173 | } 174 | } 175 | 176 | // Free the packet that was allocated by av_read_frame 177 | av_free_packet(&packet); 178 | SDL_PollEvent(&event); 179 | switch(event.type) { 180 | case SDL_QUIT: 181 | SDL_Quit(); 182 | exit(0); 183 | break; 184 | default: 185 | break; 186 | } 187 | 188 | } 189 | 190 | // Free the YUV frame 191 | av_free(pFrame); 192 | 193 | // Close the codec 194 | avcodec_close(pCodecCtx); 195 | 196 | // Close the video file 197 | avformat_close_input(&pFormatCtx); 198 | 199 | return 0; 200 | } 201 | -------------------------------------------------------------------------------- /tutorial03.c: -------------------------------------------------------------------------------- 1 | // tutorial03.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can 3 | // and play audio (out of sync). 4 | // 5 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 6 | // 7 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 8 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 9 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 10 | // 11 | // Use the Makefile to build all examples. 12 | // 13 | // Run using 14 | // tutorial03 myvideofile.mpg 15 | // 16 | // to play the stream on your screen. 17 | 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | #include 25 | 26 | #ifdef __MINGW32__ 27 | #undef main /* Prevents SDL from overriding main() */ 28 | #endif 29 | 30 | #include 31 | 32 | #define SDL_AUDIO_BUFFER_SIZE 1024 33 | #define MAX_AUDIO_FRAME_SIZE 192000 34 | 35 | typedef struct PacketQueue { 36 | AVPacketList *first_pkt, *last_pkt; 37 | int nb_packets; 38 | int size; 39 | SDL_mutex *mutex; 40 | SDL_cond *cond; 41 | } PacketQueue; 42 | 43 | PacketQueue audioq; 44 | 45 | int quit = 0; 46 | 47 | void packet_queue_init(PacketQueue *q) { 48 | memset(q, 0, sizeof(PacketQueue)); 49 | q->mutex = SDL_CreateMutex(); 50 | q->cond = SDL_CreateCond(); 51 | } 52 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 53 | 54 | AVPacketList *pkt1; 55 | if(av_dup_packet(pkt) < 0) { 56 | return -1; 57 | } 58 | pkt1 = av_malloc(sizeof(AVPacketList)); 59 | if (!pkt1) 60 | return -1; 61 | pkt1->pkt = *pkt; 62 | pkt1->next = NULL; 63 | 64 | 65 | SDL_LockMutex(q->mutex); 66 | 67 | if (!q->last_pkt) 68 | q->first_pkt = pkt1; 69 | else 70 | q->last_pkt->next = pkt1; 71 | q->last_pkt = pkt1; 72 | q->nb_packets++; 73 | q->size += pkt1->pkt.size; 74 | SDL_CondSignal(q->cond); 75 | 76 | SDL_UnlockMutex(q->mutex); 77 | return 0; 78 | } 79 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 80 | { 81 | AVPacketList *pkt1; 82 | int ret; 83 | 84 | SDL_LockMutex(q->mutex); 85 | 86 | for(;;) { 87 | 88 | if(quit) { 89 | ret = -1; 90 | break; 91 | } 92 | 93 | pkt1 = q->first_pkt; 94 | if (pkt1) { 95 | q->first_pkt = pkt1->next; 96 | if (!q->first_pkt) 97 | q->last_pkt = NULL; 98 | q->nb_packets--; 99 | q->size -= pkt1->pkt.size; 100 | *pkt = pkt1->pkt; 101 | av_free(pkt1); 102 | ret = 1; 103 | break; 104 | } else if (!block) { 105 | ret = 0; 106 | break; 107 | } else { 108 | SDL_CondWait(q->cond, q->mutex); 109 | } 110 | } 111 | SDL_UnlockMutex(q->mutex); 112 | return ret; 113 | } 114 | 115 | int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { 116 | 117 | static AVPacket pkt; 118 | static uint8_t *audio_pkt_data = NULL; 119 | static int audio_pkt_size = 0; 120 | static AVFrame frame; 121 | 122 | int len1, data_size = 0; 123 | 124 | for(;;) { 125 | while(audio_pkt_size > 0) { 126 | int got_frame = 0; 127 | len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt); 128 | if(len1 < 0) { 129 | /* if error, skip frame */ 130 | audio_pkt_size = 0; 131 | break; 132 | } 133 | audio_pkt_data += len1; 134 | audio_pkt_size -= len1; 135 | if (got_frame) 136 | { 137 | data_size = 138 | av_samples_get_buffer_size 139 | ( 140 | NULL, 141 | aCodecCtx->channels, 142 | frame.nb_samples, 143 | aCodecCtx->sample_fmt, 144 | 1 145 | ); 146 | memcpy(audio_buf, frame.data[0], data_size); 147 | } 148 | if(data_size <= 0) { 149 | /* No data yet, get more frames */ 150 | continue; 151 | } 152 | /* We have data, return it and come back for more later */ 153 | return data_size; 154 | } 155 | if(pkt.data) 156 | av_free_packet(&pkt); 157 | 158 | if(quit) { 159 | return -1; 160 | } 161 | 162 | if(packet_queue_get(&audioq, &pkt, 1) < 0) { 163 | return -1; 164 | } 165 | audio_pkt_data = pkt.data; 166 | audio_pkt_size = pkt.size; 167 | } 168 | } 169 | 170 | void audio_callback(void *userdata, Uint8 *stream, int len) { 171 | 172 | AVCodecContext *aCodecCtx = (AVCodecContext *)userdata; 173 | int len1, audio_size; 174 | 175 | static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2]; 176 | static unsigned int audio_buf_size = 0; 177 | static unsigned int audio_buf_index = 0; 178 | 179 | while(len > 0) { 180 | if(audio_buf_index >= audio_buf_size) { 181 | /* We have already sent all our data; get more */ 182 | audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size); 183 | if(audio_size < 0) { 184 | /* If error, output silence */ 185 | audio_buf_size = 1024; // arbitrary? 186 | memset(audio_buf, 0, audio_buf_size); 187 | } else { 188 | audio_buf_size = audio_size; 189 | } 190 | audio_buf_index = 0; 191 | } 192 | len1 = audio_buf_size - audio_buf_index; 193 | if(len1 > len) 194 | len1 = len; 195 | memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1); 196 | len -= len1; 197 | stream += len1; 198 | audio_buf_index += len1; 199 | } 200 | } 201 | 202 | int main(int argc, char *argv[]) { 203 | AVFormatContext *pFormatCtx = NULL; 204 | int i, videoStream, audioStream; 205 | AVCodecContext *pCodecCtx = NULL; 206 | AVCodec *pCodec = NULL; 207 | AVFrame *pFrame = NULL; 208 | AVPacket packet; 209 | int frameFinished; 210 | //float aspect_ratio; 211 | 212 | AVCodecContext *aCodecCtx = NULL; 213 | AVCodec *aCodec = NULL; 214 | 215 | SDL_Overlay *bmp = NULL; 216 | SDL_Surface *screen = NULL; 217 | SDL_Rect rect; 218 | SDL_Event event; 219 | SDL_AudioSpec wanted_spec, spec; 220 | 221 | struct SwsContext *sws_ctx = NULL; 222 | AVDictionary *videoOptionsDict = NULL; 223 | AVDictionary *audioOptionsDict = NULL; 224 | 225 | if(argc < 2) { 226 | fprintf(stderr, "Usage: test \n"); 227 | exit(1); 228 | } 229 | // Register all formats and codecs 230 | av_register_all(); 231 | 232 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 233 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 234 | exit(1); 235 | } 236 | 237 | // Open video file 238 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 239 | return -1; // Couldn't open file 240 | 241 | // Retrieve stream information 242 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 243 | return -1; // Couldn't find stream information 244 | 245 | // Dump information about file onto standard error 246 | av_dump_format(pFormatCtx, 0, argv[1], 0); 247 | 248 | // Find the first video stream 249 | videoStream=-1; 250 | audioStream=-1; 251 | for(i=0; inb_streams; i++) { 252 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 253 | videoStream < 0) { 254 | videoStream=i; 255 | } 256 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 257 | audioStream < 0) { 258 | audioStream=i; 259 | } 260 | } 261 | if(videoStream==-1) 262 | return -1; // Didn't find a video stream 263 | if(audioStream==-1) 264 | return -1; 265 | 266 | aCodecCtx=pFormatCtx->streams[audioStream]->codec; 267 | // Set audio settings from codec info 268 | wanted_spec.freq = aCodecCtx->sample_rate; 269 | wanted_spec.format = AUDIO_S16SYS; 270 | wanted_spec.channels = aCodecCtx->channels; 271 | wanted_spec.silence = 0; 272 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 273 | wanted_spec.callback = audio_callback; 274 | wanted_spec.userdata = aCodecCtx; 275 | 276 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 277 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 278 | return -1; 279 | } 280 | aCodec = avcodec_find_decoder(aCodecCtx->codec_id); 281 | if(!aCodec) { 282 | fprintf(stderr, "Unsupported codec!\n"); 283 | return -1; 284 | } 285 | avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict); 286 | 287 | // audio_st = pFormatCtx->streams[index] 288 | packet_queue_init(&audioq); 289 | SDL_PauseAudio(0); 290 | 291 | // Get a pointer to the codec context for the video stream 292 | pCodecCtx=pFormatCtx->streams[videoStream]->codec; 293 | 294 | // Find the decoder for the video stream 295 | pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 296 | if(pCodec==NULL) { 297 | fprintf(stderr, "Unsupported codec!\n"); 298 | return -1; // Codec not found 299 | } 300 | // Open codec 301 | if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0) 302 | return -1; // Could not open codec 303 | 304 | // Allocate video frame 305 | pFrame=av_frame_alloc(); 306 | 307 | // Make a screen to put our video 308 | 309 | #ifndef __DARWIN__ 310 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); 311 | #else 312 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); 313 | #endif 314 | if(!screen) { 315 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 316 | exit(1); 317 | } 318 | 319 | // Allocate a place to put our YUV image on that screen 320 | bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 321 | pCodecCtx->height, 322 | SDL_YV12_OVERLAY, 323 | screen); 324 | sws_ctx = 325 | sws_getContext 326 | ( 327 | pCodecCtx->width, 328 | pCodecCtx->height, 329 | pCodecCtx->pix_fmt, 330 | pCodecCtx->width, 331 | pCodecCtx->height, 332 | PIX_FMT_YUV420P, 333 | SWS_BILINEAR, 334 | NULL, 335 | NULL, 336 | NULL 337 | ); 338 | 339 | 340 | // Read frames and save first five frames to disk 341 | i=0; 342 | while(av_read_frame(pFormatCtx, &packet)>=0) { 343 | // Is this a packet from the video stream? 344 | if(packet.stream_index==videoStream) { 345 | // Decode video frame 346 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 347 | &packet); 348 | 349 | // Did we get a video frame? 350 | if(frameFinished) { 351 | SDL_LockYUVOverlay(bmp); 352 | 353 | AVPicture pict; 354 | pict.data[0] = bmp->pixels[0]; 355 | pict.data[1] = bmp->pixels[2]; 356 | pict.data[2] = bmp->pixels[1]; 357 | 358 | pict.linesize[0] = bmp->pitches[0]; 359 | pict.linesize[1] = bmp->pitches[2]; 360 | pict.linesize[2] = bmp->pitches[1]; 361 | 362 | // Convert the image into YUV format that SDL uses 363 | sws_scale 364 | ( 365 | sws_ctx, 366 | (uint8_t const * const *)pFrame->data, 367 | pFrame->linesize, 368 | 0, 369 | pCodecCtx->height, 370 | pict.data, 371 | pict.linesize 372 | ); 373 | 374 | SDL_UnlockYUVOverlay(bmp); 375 | 376 | rect.x = 0; 377 | rect.y = 0; 378 | rect.w = pCodecCtx->width; 379 | rect.h = pCodecCtx->height; 380 | SDL_DisplayYUVOverlay(bmp, &rect); 381 | av_free_packet(&packet); 382 | } 383 | } else if(packet.stream_index==audioStream) { 384 | packet_queue_put(&audioq, &packet); 385 | } else { 386 | av_free_packet(&packet); 387 | } 388 | // Free the packet that was allocated by av_read_frame 389 | SDL_PollEvent(&event); 390 | switch(event.type) { 391 | case SDL_QUIT: 392 | quit = 1; 393 | SDL_Quit(); 394 | exit(0); 395 | break; 396 | default: 397 | break; 398 | } 399 | 400 | } 401 | 402 | // Free the YUV frame 403 | av_free(pFrame); 404 | 405 | // Close the codec 406 | avcodec_close(pCodecCtx); 407 | 408 | // Close the video file 409 | avformat_close_input(&pFormatCtx); 410 | 411 | return 0; 412 | } 413 | -------------------------------------------------------------------------------- /tutorial04.c: -------------------------------------------------------------------------------- 1 | // tutorial04.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can, 3 | // and play audio (out of sync). 4 | // 5 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 6 | // 7 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 8 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 9 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 10 | // 11 | // Use the Makefile to build all the samples. 12 | // 13 | // Run using 14 | // tutorial04 myvideofile.mpg 15 | // 16 | // to play the video stream on your screen. 17 | 18 | 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include 27 | #include 28 | 29 | #ifdef __MINGW32__ 30 | #undef main /* Prevents SDL from overriding main() */ 31 | #endif 32 | 33 | #include 34 | #include 35 | 36 | #define SDL_AUDIO_BUFFER_SIZE 1024 37 | #define MAX_AUDIO_FRAME_SIZE 192000 38 | 39 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 40 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 41 | 42 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 43 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 44 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 45 | 46 | #define VIDEO_PICTURE_QUEUE_SIZE 1 47 | 48 | typedef struct PacketQueue { 49 | AVPacketList *first_pkt, *last_pkt; 50 | int nb_packets; 51 | int size; 52 | SDL_mutex *mutex; 53 | SDL_cond *cond; 54 | } PacketQueue; 55 | 56 | 57 | typedef struct VideoPicture { 58 | SDL_Overlay *bmp; 59 | int width, height; /* source height & width */ 60 | int allocated; 61 | } VideoPicture; 62 | 63 | typedef struct VideoState { 64 | 65 | AVFormatContext *pFormatCtx; 66 | int videoStream, audioStream; 67 | AVStream *audio_st; 68 | PacketQueue audioq; 69 | uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2]; 70 | unsigned int audio_buf_size; 71 | unsigned int audio_buf_index; 72 | AVFrame audio_frame; 73 | AVPacket audio_pkt; 74 | uint8_t *audio_pkt_data; 75 | int audio_pkt_size; 76 | AVStream *video_st; 77 | PacketQueue videoq; 78 | 79 | VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; 80 | int pictq_size, pictq_rindex, pictq_windex; 81 | SDL_mutex *pictq_mutex; 82 | SDL_cond *pictq_cond; 83 | 84 | SDL_Thread *parse_tid; 85 | SDL_Thread *video_tid; 86 | 87 | char filename[1024]; 88 | int quit; 89 | 90 | AVIOContext *io_context; 91 | struct SwsContext *sws_ctx; 92 | } VideoState; 93 | 94 | SDL_Surface *screen; 95 | 96 | /* Since we only have one decoding thread, the Big Struct 97 | can be global in case we need it. */ 98 | VideoState *global_video_state; 99 | 100 | void packet_queue_init(PacketQueue *q) { 101 | memset(q, 0, sizeof(PacketQueue)); 102 | q->mutex = SDL_CreateMutex(); 103 | q->cond = SDL_CreateCond(); 104 | } 105 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 106 | 107 | AVPacketList *pkt1; 108 | if(av_dup_packet(pkt) < 0) { 109 | return -1; 110 | } 111 | pkt1 = av_malloc(sizeof(AVPacketList)); 112 | if (!pkt1) 113 | return -1; 114 | pkt1->pkt = *pkt; 115 | pkt1->next = NULL; 116 | 117 | SDL_LockMutex(q->mutex); 118 | 119 | if (!q->last_pkt) 120 | q->first_pkt = pkt1; 121 | else 122 | q->last_pkt->next = pkt1; 123 | q->last_pkt = pkt1; 124 | q->nb_packets++; 125 | q->size += pkt1->pkt.size; 126 | SDL_CondSignal(q->cond); 127 | 128 | SDL_UnlockMutex(q->mutex); 129 | return 0; 130 | } 131 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 132 | { 133 | AVPacketList *pkt1; 134 | int ret; 135 | 136 | SDL_LockMutex(q->mutex); 137 | 138 | for(;;) { 139 | 140 | if(global_video_state->quit) { 141 | ret = -1; 142 | break; 143 | } 144 | 145 | pkt1 = q->first_pkt; 146 | if (pkt1) { 147 | q->first_pkt = pkt1->next; 148 | if (!q->first_pkt) 149 | q->last_pkt = NULL; 150 | q->nb_packets--; 151 | q->size -= pkt1->pkt.size; 152 | *pkt = pkt1->pkt; 153 | av_free(pkt1); 154 | ret = 1; 155 | break; 156 | } else if (!block) { 157 | ret = 0; 158 | break; 159 | } else { 160 | SDL_CondWait(q->cond, q->mutex); 161 | } 162 | } 163 | SDL_UnlockMutex(q->mutex); 164 | return ret; 165 | } 166 | 167 | int audio_decode_frame(VideoState *is) { 168 | int len1, data_size = 0; 169 | AVPacket *pkt = &is->audio_pkt; 170 | 171 | for(;;) { 172 | while(is->audio_pkt_size > 0) { 173 | int got_frame = 0; 174 | len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); 175 | if(len1 < 0) { 176 | /* if error, skip frame */ 177 | is->audio_pkt_size = 0; 178 | break; 179 | } 180 | if (got_frame) 181 | { 182 | data_size = 183 | av_samples_get_buffer_size 184 | ( 185 | NULL, 186 | is->audio_st->codec->channels, 187 | is->audio_frame.nb_samples, 188 | is->audio_st->codec->sample_fmt, 189 | 1 190 | ); 191 | memcpy(is->audio_buf, is->audio_frame.data[0], data_size); 192 | } 193 | is->audio_pkt_data += len1; 194 | is->audio_pkt_size -= len1; 195 | if(data_size <= 0) { 196 | /* No data yet, get more frames */ 197 | continue; 198 | } 199 | /* We have data, return it and come back for more later */ 200 | return data_size; 201 | } 202 | if(pkt->data) 203 | av_free_packet(pkt); 204 | 205 | if(is->quit) { 206 | return -1; 207 | } 208 | /* next packet */ 209 | if(packet_queue_get(&is->audioq, pkt, 1) < 0) { 210 | return -1; 211 | } 212 | is->audio_pkt_data = pkt->data; 213 | is->audio_pkt_size = pkt->size; 214 | } 215 | } 216 | 217 | void audio_callback(void *userdata, Uint8 *stream, int len) { 218 | 219 | VideoState *is = (VideoState *)userdata; 220 | int len1, audio_size; 221 | 222 | while(len > 0) { 223 | if(is->audio_buf_index >= is->audio_buf_size) { 224 | /* We have already sent all our data; get more */ 225 | audio_size = audio_decode_frame(is); 226 | if(audio_size < 0) { 227 | /* If error, output silence */ 228 | is->audio_buf_size = 1024; 229 | memset(is->audio_buf, 0, is->audio_buf_size); 230 | } else { 231 | is->audio_buf_size = audio_size; 232 | } 233 | is->audio_buf_index = 0; 234 | } 235 | len1 = is->audio_buf_size - is->audio_buf_index; 236 | if(len1 > len) 237 | len1 = len; 238 | memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); 239 | len -= len1; 240 | stream += len1; 241 | is->audio_buf_index += len1; 242 | } 243 | } 244 | 245 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 246 | SDL_Event event; 247 | event.type = FF_REFRESH_EVENT; 248 | event.user.data1 = opaque; 249 | SDL_PushEvent(&event); 250 | return 0; /* 0 means stop timer */ 251 | } 252 | 253 | /* schedule a video refresh in 'delay' ms */ 254 | static void schedule_refresh(VideoState *is, int delay) { 255 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 256 | } 257 | 258 | void video_display(VideoState *is) { 259 | 260 | SDL_Rect rect; 261 | VideoPicture *vp; 262 | //AVPicture pict; 263 | float aspect_ratio; 264 | int w, h, x, y; 265 | //int i; 266 | 267 | vp = &is->pictq[is->pictq_rindex]; 268 | if(vp->bmp) { 269 | if(is->video_st->codec->sample_aspect_ratio.num == 0) { 270 | aspect_ratio = 0; 271 | } else { 272 | aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * 273 | is->video_st->codec->width / is->video_st->codec->height; 274 | } 275 | if(aspect_ratio <= 0.0) { 276 | aspect_ratio = (float)is->video_st->codec->width / 277 | (float)is->video_st->codec->height; 278 | } 279 | h = screen->h; 280 | w = ((int)rint(h * aspect_ratio)) & -3; 281 | if(w > screen->w) { 282 | w = screen->w; 283 | h = ((int)rint(w / aspect_ratio)) & -3; 284 | } 285 | x = (screen->w - w) / 2; 286 | y = (screen->h - h) / 2; 287 | 288 | rect.x = x; 289 | rect.y = y; 290 | rect.w = w; 291 | rect.h = h; 292 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 293 | } 294 | } 295 | 296 | void video_refresh_timer(void *userdata) { 297 | 298 | VideoState *is = (VideoState *)userdata; 299 | // vp is used in later tutorials for synchronization 300 | //VideoPicture *vp; 301 | 302 | if(is->video_st) { 303 | if(is->pictq_size == 0) { 304 | schedule_refresh(is, 1); 305 | } else { 306 | //vp = &is->pictq[is->pictq_rindex]; 307 | /* Now, normally here goes a ton of code 308 | about timing, etc. we're just going to 309 | guess at a delay for now. You can 310 | increase and decrease this value and hard code 311 | the timing - but I don't suggest that ;) 312 | We'll learn how to do it for real later. 313 | */ 314 | schedule_refresh(is, 80); 315 | 316 | /* show the picture! */ 317 | video_display(is); 318 | 319 | /* update queue for next picture! */ 320 | if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 321 | is->pictq_rindex = 0; 322 | } 323 | SDL_LockMutex(is->pictq_mutex); 324 | is->pictq_size--; 325 | SDL_CondSignal(is->pictq_cond); 326 | SDL_UnlockMutex(is->pictq_mutex); 327 | } 328 | } else { 329 | schedule_refresh(is, 100); 330 | } 331 | } 332 | 333 | void alloc_picture(void *userdata) { 334 | 335 | VideoState *is = (VideoState *)userdata; 336 | VideoPicture *vp; 337 | 338 | vp = &is->pictq[is->pictq_windex]; 339 | if(vp->bmp) { 340 | // we already have one make another, bigger/smaller 341 | SDL_FreeYUVOverlay(vp->bmp); 342 | } 343 | // Allocate a place to put our YUV image on that screen 344 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 345 | is->video_st->codec->height, 346 | SDL_YV12_OVERLAY, 347 | screen); 348 | vp->width = is->video_st->codec->width; 349 | vp->height = is->video_st->codec->height; 350 | 351 | SDL_LockMutex(is->pictq_mutex); 352 | vp->allocated = 1; 353 | SDL_CondSignal(is->pictq_cond); 354 | SDL_UnlockMutex(is->pictq_mutex); 355 | 356 | } 357 | 358 | int queue_picture(VideoState *is, AVFrame *pFrame) { 359 | 360 | VideoPicture *vp; 361 | AVPicture pict; 362 | 363 | /* wait until we have space for a new pic */ 364 | SDL_LockMutex(is->pictq_mutex); 365 | while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 366 | !is->quit) { 367 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 368 | } 369 | SDL_UnlockMutex(is->pictq_mutex); 370 | 371 | if(is->quit) 372 | return -1; 373 | 374 | // windex is set to 0 initially 375 | vp = &is->pictq[is->pictq_windex]; 376 | 377 | /* allocate or resize the buffer! */ 378 | if(!vp->bmp || 379 | vp->width != is->video_st->codec->width || 380 | vp->height != is->video_st->codec->height) { 381 | SDL_Event event; 382 | 383 | vp->allocated = 0; 384 | /* we have to do it in the main thread */ 385 | event.type = FF_ALLOC_EVENT; 386 | event.user.data1 = is; 387 | SDL_PushEvent(&event); 388 | 389 | /* wait until we have a picture allocated */ 390 | SDL_LockMutex(is->pictq_mutex); 391 | while(!vp->allocated && !is->quit) { 392 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 393 | } 394 | SDL_UnlockMutex(is->pictq_mutex); 395 | if(is->quit) { 396 | return -1; 397 | } 398 | } 399 | /* We have a place to put our picture on the queue */ 400 | 401 | if(vp->bmp) { 402 | 403 | SDL_LockYUVOverlay(vp->bmp); 404 | 405 | /* point pict at the queue */ 406 | 407 | pict.data[0] = vp->bmp->pixels[0]; 408 | pict.data[1] = vp->bmp->pixels[2]; 409 | pict.data[2] = vp->bmp->pixels[1]; 410 | 411 | pict.linesize[0] = vp->bmp->pitches[0]; 412 | pict.linesize[1] = vp->bmp->pitches[2]; 413 | pict.linesize[2] = vp->bmp->pitches[1]; 414 | 415 | // Convert the image into YUV format that SDL uses 416 | sws_scale 417 | ( 418 | is->sws_ctx, 419 | (uint8_t const * const *)pFrame->data, 420 | pFrame->linesize, 421 | 0, 422 | is->video_st->codec->height, 423 | pict.data, 424 | pict.linesize 425 | ); 426 | 427 | SDL_UnlockYUVOverlay(vp->bmp); 428 | /* now we inform our display thread that we have a pic ready */ 429 | if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 430 | is->pictq_windex = 0; 431 | } 432 | SDL_LockMutex(is->pictq_mutex); 433 | is->pictq_size++; 434 | SDL_UnlockMutex(is->pictq_mutex); 435 | } 436 | return 0; 437 | } 438 | 439 | int video_thread(void *arg) { 440 | VideoState *is = (VideoState *)arg; 441 | AVPacket pkt1, *packet = &pkt1; 442 | int frameFinished; 443 | AVFrame *pFrame; 444 | 445 | pFrame = av_frame_alloc(); 446 | 447 | for(;;) { 448 | if(packet_queue_get(&is->videoq, packet, 1) < 0) { 449 | // means we quit getting packets 450 | break; 451 | } 452 | // Decode video frame 453 | avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, 454 | packet); 455 | 456 | // Did we get a video frame? 457 | if(frameFinished) { 458 | if(queue_picture(is, pFrame) < 0) { 459 | break; 460 | } 461 | } 462 | av_free_packet(packet); 463 | } 464 | av_free(pFrame); 465 | return 0; 466 | } 467 | 468 | int stream_component_open(VideoState *is, int stream_index) { 469 | 470 | AVFormatContext *pFormatCtx = is->pFormatCtx; 471 | AVCodecContext *codecCtx = NULL; 472 | AVCodec *codec = NULL; 473 | AVDictionary *optionsDict = NULL; 474 | SDL_AudioSpec wanted_spec, spec; 475 | 476 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 477 | return -1; 478 | } 479 | 480 | // Get a pointer to the codec context for the video stream 481 | codecCtx = pFormatCtx->streams[stream_index]->codec; 482 | 483 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 484 | // Set audio settings from codec info 485 | wanted_spec.freq = codecCtx->sample_rate; 486 | wanted_spec.format = AUDIO_S16SYS; 487 | wanted_spec.channels = codecCtx->channels; 488 | wanted_spec.silence = 0; 489 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 490 | wanted_spec.callback = audio_callback; 491 | wanted_spec.userdata = is; 492 | 493 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 494 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 495 | return -1; 496 | } 497 | } 498 | codec = avcodec_find_decoder(codecCtx->codec_id); 499 | if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { 500 | fprintf(stderr, "Unsupported codec!\n"); 501 | return -1; 502 | } 503 | 504 | switch(codecCtx->codec_type) { 505 | case AVMEDIA_TYPE_AUDIO: 506 | is->audioStream = stream_index; 507 | is->audio_st = pFormatCtx->streams[stream_index]; 508 | is->audio_buf_size = 0; 509 | is->audio_buf_index = 0; 510 | memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); 511 | packet_queue_init(&is->audioq); 512 | SDL_PauseAudio(0); 513 | break; 514 | case AVMEDIA_TYPE_VIDEO: 515 | is->videoStream = stream_index; 516 | is->video_st = pFormatCtx->streams[stream_index]; 517 | 518 | packet_queue_init(&is->videoq); 519 | is->video_tid = SDL_CreateThread(video_thread, is); 520 | is->sws_ctx = 521 | sws_getContext 522 | ( 523 | is->video_st->codec->width, 524 | is->video_st->codec->height, 525 | is->video_st->codec->pix_fmt, 526 | is->video_st->codec->width, 527 | is->video_st->codec->height, 528 | PIX_FMT_YUV420P, 529 | SWS_BILINEAR, 530 | NULL, 531 | NULL, 532 | NULL 533 | ); 534 | break; 535 | default: 536 | break; 537 | } 538 | return 0; 539 | } 540 | 541 | int decode_interrupt_cb(void *opaque) { 542 | return (global_video_state && global_video_state->quit); 543 | } 544 | 545 | int decode_thread(void *arg) { 546 | 547 | VideoState *is = (VideoState *)arg; 548 | AVFormatContext *pFormatCtx = NULL; 549 | AVPacket pkt1, *packet = &pkt1; 550 | 551 | int video_index = -1; 552 | int audio_index = -1; 553 | int i; 554 | 555 | AVDictionary *io_dict = NULL; 556 | AVIOInterruptCB callback; 557 | 558 | is->videoStream=-1; 559 | is->audioStream=-1; 560 | 561 | global_video_state = is; 562 | // will interrupt blocking functions if we quit! 563 | callback.callback = decode_interrupt_cb; 564 | callback.opaque = is; 565 | if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) 566 | { 567 | fprintf(stderr, "Unable to open I/O for %s\n", is->filename); 568 | return -1; 569 | } 570 | 571 | // Open video file 572 | if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) 573 | return -1; // Couldn't open file 574 | 575 | is->pFormatCtx = pFormatCtx; 576 | 577 | // Retrieve stream information 578 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 579 | return -1; // Couldn't find stream information 580 | 581 | // Dump information about file onto standard error 582 | av_dump_format(pFormatCtx, 0, is->filename, 0); 583 | 584 | // Find the first video stream 585 | 586 | for(i=0; inb_streams; i++) { 587 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 588 | video_index < 0) { 589 | video_index=i; 590 | } 591 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 592 | audio_index < 0) { 593 | audio_index=i; 594 | } 595 | } 596 | if(audio_index >= 0) { 597 | stream_component_open(is, audio_index); 598 | } 599 | if(video_index >= 0) { 600 | stream_component_open(is, video_index); 601 | } 602 | 603 | if(is->videoStream < 0 || is->audioStream < 0) { 604 | fprintf(stderr, "%s: could not open codecs\n", is->filename); 605 | goto fail; 606 | } 607 | 608 | // main decode loop 609 | 610 | for(;;) { 611 | if(is->quit) { 612 | break; 613 | } 614 | // seek stuff goes here 615 | if(is->audioq.size > MAX_AUDIOQ_SIZE || 616 | is->videoq.size > MAX_VIDEOQ_SIZE) { 617 | SDL_Delay(10); 618 | continue; 619 | } 620 | if(av_read_frame(is->pFormatCtx, packet) < 0) { 621 | if(is->pFormatCtx->pb->error == 0) { 622 | SDL_Delay(100); /* no error; wait for user input */ 623 | continue; 624 | } else { 625 | break; 626 | } 627 | } 628 | // Is this a packet from the video stream? 629 | if(packet->stream_index == is->videoStream) { 630 | packet_queue_put(&is->videoq, packet); 631 | } else if(packet->stream_index == is->audioStream) { 632 | packet_queue_put(&is->audioq, packet); 633 | } else { 634 | av_free_packet(packet); 635 | } 636 | } 637 | /* all done - wait for it */ 638 | while(!is->quit) { 639 | SDL_Delay(100); 640 | } 641 | 642 | fail: 643 | if(1){ 644 | SDL_Event event; 645 | event.type = FF_QUIT_EVENT; 646 | event.user.data1 = is; 647 | SDL_PushEvent(&event); 648 | } 649 | return 0; 650 | } 651 | 652 | int main(int argc, char *argv[]) { 653 | 654 | SDL_Event event; 655 | 656 | VideoState *is; 657 | 658 | is = av_mallocz(sizeof(VideoState)); 659 | 660 | if(argc < 2) { 661 | fprintf(stderr, "Usage: test \n"); 662 | exit(1); 663 | } 664 | // Register all formats and codecs 665 | av_register_all(); 666 | 667 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 668 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 669 | exit(1); 670 | } 671 | 672 | // Make a screen to put our video 673 | #ifndef __DARWIN__ 674 | screen = SDL_SetVideoMode(640, 480, 0, 0); 675 | #else 676 | screen = SDL_SetVideoMode(640, 480, 24, 0); 677 | #endif 678 | if(!screen) { 679 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 680 | exit(1); 681 | } 682 | 683 | av_strlcpy(is->filename, argv[1], 1024); 684 | 685 | is->pictq_mutex = SDL_CreateMutex(); 686 | is->pictq_cond = SDL_CreateCond(); 687 | 688 | schedule_refresh(is, 40); 689 | 690 | is->parse_tid = SDL_CreateThread(decode_thread, is); 691 | if(!is->parse_tid) { 692 | av_free(is); 693 | return -1; 694 | } 695 | for(;;) { 696 | 697 | SDL_WaitEvent(&event); 698 | switch(event.type) { 699 | case FF_QUIT_EVENT: 700 | case SDL_QUIT: 701 | is->quit = 1; 702 | /* 703 | * If the video has finished playing, then both the picture and 704 | * audio queues are waiting for more data. Make them stop 705 | * waiting and terminate normally. 706 | */ 707 | SDL_CondSignal(is->audioq.cond); 708 | SDL_CondSignal(is->videoq.cond); 709 | SDL_Quit(); 710 | return 0; 711 | break; 712 | case FF_ALLOC_EVENT: 713 | alloc_picture(event.user.data1); 714 | break; 715 | case FF_REFRESH_EVENT: 716 | video_refresh_timer(event.user.data1); 717 | break; 718 | default: 719 | break; 720 | } 721 | } 722 | return 0; 723 | 724 | } 725 | -------------------------------------------------------------------------------- /tutorial05.c: -------------------------------------------------------------------------------- 1 | // tutorial05.c 2 | // A pedagogical video player that really works! 3 | // 4 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 5 | // 6 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 7 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 8 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 9 | // 10 | // Use the Makefile to build all the samples. 11 | // 12 | // Run using 13 | // tutorial05 myvideofile.mpg 14 | // 15 | // to play the video. 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | 27 | #ifdef __MINGW32__ 28 | #undef main /* Prevents SDL from overriding main() */ 29 | #endif 30 | 31 | #include 32 | #include 33 | 34 | #define SDL_AUDIO_BUFFER_SIZE 1024 35 | #define MAX_AUDIO_FRAME_SIZE 192000 36 | 37 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 38 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 39 | 40 | #define AV_SYNC_THRESHOLD 0.01 41 | #define AV_NOSYNC_THRESHOLD 10.0 42 | 43 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 44 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 45 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 46 | 47 | #define VIDEO_PICTURE_QUEUE_SIZE 1 48 | 49 | typedef struct PacketQueue { 50 | AVPacketList *first_pkt, *last_pkt; 51 | int nb_packets; 52 | int size; 53 | SDL_mutex *mutex; 54 | SDL_cond *cond; 55 | } PacketQueue; 56 | 57 | 58 | typedef struct VideoPicture { 59 | SDL_Overlay *bmp; 60 | int width, height; /* source height & width */ 61 | int allocated; 62 | double pts; 63 | } VideoPicture; 64 | 65 | typedef struct VideoState { 66 | 67 | AVFormatContext *pFormatCtx; 68 | int videoStream, audioStream; 69 | 70 | double audio_clock; 71 | AVStream *audio_st; 72 | PacketQueue audioq; 73 | AVFrame audio_frame; 74 | uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2]; 75 | unsigned int audio_buf_size; 76 | unsigned int audio_buf_index; 77 | AVPacket audio_pkt; 78 | uint8_t *audio_pkt_data; 79 | int audio_pkt_size; 80 | int audio_hw_buf_size; 81 | double frame_timer; 82 | double frame_last_pts; 83 | double frame_last_delay; 84 | double video_clock; ///mutex = SDL_CreateMutex(); 112 | q->cond = SDL_CreateCond(); 113 | } 114 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 115 | 116 | AVPacketList *pkt1; 117 | if(av_dup_packet(pkt) < 0) { 118 | return -1; 119 | } 120 | pkt1 = av_malloc(sizeof(AVPacketList)); 121 | if (!pkt1) 122 | return -1; 123 | pkt1->pkt = *pkt; 124 | pkt1->next = NULL; 125 | 126 | SDL_LockMutex(q->mutex); 127 | 128 | if (!q->last_pkt) 129 | q->first_pkt = pkt1; 130 | else 131 | q->last_pkt->next = pkt1; 132 | q->last_pkt = pkt1; 133 | q->nb_packets++; 134 | q->size += pkt1->pkt.size; 135 | SDL_CondSignal(q->cond); 136 | 137 | SDL_UnlockMutex(q->mutex); 138 | return 0; 139 | } 140 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 141 | { 142 | AVPacketList *pkt1; 143 | int ret; 144 | 145 | SDL_LockMutex(q->mutex); 146 | 147 | for(;;) { 148 | 149 | if(global_video_state->quit) { 150 | ret = -1; 151 | break; 152 | } 153 | 154 | pkt1 = q->first_pkt; 155 | if (pkt1) { 156 | q->first_pkt = pkt1->next; 157 | if (!q->first_pkt) 158 | q->last_pkt = NULL; 159 | q->nb_packets--; 160 | q->size -= pkt1->pkt.size; 161 | *pkt = pkt1->pkt; 162 | av_free(pkt1); 163 | ret = 1; 164 | break; 165 | } else if (!block) { 166 | ret = 0; 167 | break; 168 | } else { 169 | SDL_CondWait(q->cond, q->mutex); 170 | } 171 | } 172 | SDL_UnlockMutex(q->mutex); 173 | return ret; 174 | } 175 | double get_audio_clock(VideoState *is) { 176 | double pts; 177 | int hw_buf_size, bytes_per_sec, n; 178 | 179 | pts = is->audio_clock; /* maintained in the audio thread */ 180 | hw_buf_size = is->audio_buf_size - is->audio_buf_index; 181 | bytes_per_sec = 0; 182 | n = is->audio_st->codec->channels * 2; 183 | if(is->audio_st) { 184 | bytes_per_sec = is->audio_st->codec->sample_rate * n; 185 | } 186 | if(bytes_per_sec) { 187 | pts -= (double)hw_buf_size / bytes_per_sec; 188 | } 189 | return pts; 190 | } 191 | 192 | int audio_decode_frame(VideoState *is, double *pts_ptr) { 193 | int len1, data_size = 0, n; 194 | AVPacket *pkt = &is->audio_pkt; 195 | double pts; 196 | 197 | for(;;) { 198 | while(is->audio_pkt_size > 0) { 199 | int got_frame; 200 | len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); 201 | if(len1 < 0) { 202 | /* if error, skip frame */ 203 | is->audio_pkt_size = 0; 204 | break; 205 | } 206 | if (got_frame) 207 | { 208 | data_size = 209 | av_samples_get_buffer_size 210 | ( 211 | NULL, 212 | is->audio_st->codec->channels, 213 | is->audio_frame.nb_samples, 214 | is->audio_st->codec->sample_fmt, 215 | 1 216 | ); 217 | memcpy(is->audio_buf, is->audio_frame.data[0], data_size); 218 | } 219 | is->audio_pkt_data += len1; 220 | is->audio_pkt_size -= len1; 221 | if(data_size <= 0) { 222 | /* No data yet, get more frames */ 223 | continue; 224 | } 225 | pts = is->audio_clock; 226 | *pts_ptr = pts; 227 | n = 2 * is->audio_st->codec->channels; 228 | is->audio_clock += (double)data_size / 229 | (double)(n * is->audio_st->codec->sample_rate); 230 | 231 | /* We have data, return it and come back for more later */ 232 | return data_size; 233 | } 234 | if(pkt->data) 235 | av_free_packet(pkt); 236 | 237 | if(is->quit) { 238 | return -1; 239 | } 240 | /* next packet */ 241 | if(packet_queue_get(&is->audioq, pkt, 1) < 0) { 242 | return -1; 243 | } 244 | is->audio_pkt_data = pkt->data; 245 | is->audio_pkt_size = pkt->size; 246 | /* if update, update the audio clock w/pts */ 247 | if(pkt->pts != AV_NOPTS_VALUE) { 248 | is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; 249 | } 250 | 251 | } 252 | } 253 | 254 | void audio_callback(void *userdata, Uint8 *stream, int len) { 255 | 256 | VideoState *is = (VideoState *)userdata; 257 | int len1, audio_size; 258 | double pts; 259 | 260 | while(len > 0) { 261 | if(is->audio_buf_index >= is->audio_buf_size) { 262 | /* We have already sent all our data; get more */ 263 | audio_size = audio_decode_frame(is, &pts); 264 | if(audio_size < 0) { 265 | /* If error, output silence */ 266 | is->audio_buf_size = 1024; 267 | memset(is->audio_buf, 0, is->audio_buf_size); 268 | } else { 269 | is->audio_buf_size = audio_size; 270 | } 271 | is->audio_buf_index = 0; 272 | } 273 | len1 = is->audio_buf_size - is->audio_buf_index; 274 | if(len1 > len) 275 | len1 = len; 276 | memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); 277 | len -= len1; 278 | stream += len1; 279 | is->audio_buf_index += len1; 280 | } 281 | } 282 | 283 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 284 | SDL_Event event; 285 | event.type = FF_REFRESH_EVENT; 286 | event.user.data1 = opaque; 287 | SDL_PushEvent(&event); 288 | return 0; /* 0 means stop timer */ 289 | } 290 | 291 | /* schedule a video refresh in 'delay' ms */ 292 | static void schedule_refresh(VideoState *is, int delay) { 293 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 294 | } 295 | 296 | void video_display(VideoState *is) { 297 | 298 | SDL_Rect rect; 299 | VideoPicture *vp; 300 | //AVPicture pict; 301 | float aspect_ratio; 302 | int w, h, x, y; 303 | //int i; 304 | 305 | vp = &is->pictq[is->pictq_rindex]; 306 | if(vp->bmp) { 307 | if(is->video_st->codec->sample_aspect_ratio.num == 0) { 308 | aspect_ratio = 0; 309 | } else { 310 | aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * 311 | is->video_st->codec->width / is->video_st->codec->height; 312 | } 313 | if(aspect_ratio <= 0.0) { 314 | aspect_ratio = (float)is->video_st->codec->width / 315 | (float)is->video_st->codec->height; 316 | } 317 | h = screen->h; 318 | w = ((int)rint(h * aspect_ratio)) & -3; 319 | if(w > screen->w) { 320 | w = screen->w; 321 | h = ((int)rint(w / aspect_ratio)) & -3; 322 | } 323 | x = (screen->w - w) / 2; 324 | y = (screen->h - h) / 2; 325 | 326 | rect.x = x; 327 | rect.y = y; 328 | rect.w = w; 329 | rect.h = h; 330 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 331 | } 332 | } 333 | 334 | void video_refresh_timer(void *userdata) { 335 | 336 | VideoState *is = (VideoState *)userdata; 337 | VideoPicture *vp; 338 | double actual_delay, delay, sync_threshold, ref_clock, diff; 339 | 340 | if(is->video_st) { 341 | if(is->pictq_size == 0) { 342 | schedule_refresh(is, 1); 343 | } else { 344 | vp = &is->pictq[is->pictq_rindex]; 345 | 346 | delay = vp->pts - is->frame_last_pts; /* the pts from last time */ 347 | if(delay <= 0 || delay >= 1.0) { 348 | /* if incorrect delay, use previous one */ 349 | delay = is->frame_last_delay; 350 | } 351 | /* save for next time */ 352 | is->frame_last_delay = delay; 353 | is->frame_last_pts = vp->pts; 354 | 355 | /* update delay to sync to audio */ 356 | ref_clock = get_audio_clock(is); 357 | diff = vp->pts - ref_clock; 358 | 359 | /* Skip or repeat the frame. Take delay into account 360 | FFPlay still doesn't "know if this is the best guess." */ 361 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 362 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 363 | if(diff <= -sync_threshold) { 364 | delay = 0; 365 | } else if(diff >= sync_threshold) { 366 | delay = 2 * delay; 367 | } 368 | } 369 | is->frame_timer += delay; 370 | /* computer the REAL delay */ 371 | actual_delay = is->frame_timer - (av_gettime() / 1000000.0); 372 | if(actual_delay < 0.010) { 373 | /* Really it should skip the picture instead */ 374 | actual_delay = 0.010; 375 | } 376 | schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); 377 | /* show the picture! */ 378 | video_display(is); 379 | 380 | /* update queue for next picture! */ 381 | if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 382 | is->pictq_rindex = 0; 383 | } 384 | SDL_LockMutex(is->pictq_mutex); 385 | is->pictq_size--; 386 | SDL_CondSignal(is->pictq_cond); 387 | SDL_UnlockMutex(is->pictq_mutex); 388 | } 389 | } else { 390 | schedule_refresh(is, 100); 391 | } 392 | } 393 | 394 | void alloc_picture(void *userdata) { 395 | 396 | VideoState *is = (VideoState *)userdata; 397 | VideoPicture *vp; 398 | 399 | vp = &is->pictq[is->pictq_windex]; 400 | if(vp->bmp) { 401 | // we already have one make another, bigger/smaller 402 | SDL_FreeYUVOverlay(vp->bmp); 403 | } 404 | // Allocate a place to put our YUV image on that screen 405 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 406 | is->video_st->codec->height, 407 | SDL_YV12_OVERLAY, 408 | screen); 409 | vp->width = is->video_st->codec->width; 410 | vp->height = is->video_st->codec->height; 411 | 412 | SDL_LockMutex(is->pictq_mutex); 413 | vp->allocated = 1; 414 | SDL_CondSignal(is->pictq_cond); 415 | SDL_UnlockMutex(is->pictq_mutex); 416 | 417 | } 418 | 419 | int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { 420 | 421 | VideoPicture *vp; 422 | AVPicture pict; 423 | 424 | /* wait until we have space for a new pic */ 425 | SDL_LockMutex(is->pictq_mutex); 426 | while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 427 | !is->quit) { 428 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 429 | } 430 | SDL_UnlockMutex(is->pictq_mutex); 431 | 432 | if(is->quit) 433 | return -1; 434 | 435 | // windex is set to 0 initially 436 | vp = &is->pictq[is->pictq_windex]; 437 | 438 | /* allocate or resize the buffer! */ 439 | if(!vp->bmp || 440 | vp->width != is->video_st->codec->width || 441 | vp->height != is->video_st->codec->height) { 442 | SDL_Event event; 443 | 444 | vp->allocated = 0; 445 | /* we have to do it in the main thread */ 446 | event.type = FF_ALLOC_EVENT; 447 | event.user.data1 = is; 448 | SDL_PushEvent(&event); 449 | 450 | /* wait until we have a picture allocated */ 451 | SDL_LockMutex(is->pictq_mutex); 452 | while(!vp->allocated && !is->quit) { 453 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 454 | } 455 | SDL_UnlockMutex(is->pictq_mutex); 456 | if(is->quit) { 457 | return -1; 458 | } 459 | } 460 | /* We have a place to put our picture on the queue */ 461 | /* If we are skipping a frame, do we set this to null 462 | but still return vp->allocated = 1? */ 463 | 464 | 465 | if(vp->bmp) { 466 | 467 | SDL_LockYUVOverlay(vp->bmp); 468 | 469 | /* point pict at the queue */ 470 | 471 | pict.data[0] = vp->bmp->pixels[0]; 472 | pict.data[1] = vp->bmp->pixels[2]; 473 | pict.data[2] = vp->bmp->pixels[1]; 474 | 475 | pict.linesize[0] = vp->bmp->pitches[0]; 476 | pict.linesize[1] = vp->bmp->pitches[2]; 477 | pict.linesize[2] = vp->bmp->pitches[1]; 478 | 479 | // Convert the image into YUV format that SDL uses 480 | sws_scale 481 | ( 482 | is->sws_ctx, 483 | (uint8_t const * const *)pFrame->data, 484 | pFrame->linesize, 485 | 0, 486 | is->video_st->codec->height, 487 | pict.data, 488 | pict.linesize 489 | ); 490 | 491 | SDL_UnlockYUVOverlay(vp->bmp); 492 | vp->pts = pts; 493 | 494 | /* now we inform our display thread that we have a pic ready */ 495 | if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 496 | is->pictq_windex = 0; 497 | } 498 | SDL_LockMutex(is->pictq_mutex); 499 | is->pictq_size++; 500 | SDL_UnlockMutex(is->pictq_mutex); 501 | } 502 | return 0; 503 | } 504 | 505 | double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) { 506 | 507 | double frame_delay; 508 | 509 | if(pts != 0) { 510 | /* if we have pts, set video clock to it */ 511 | is->video_clock = pts; 512 | } else { 513 | /* if we aren't given a pts, set it to the clock */ 514 | pts = is->video_clock; 515 | } 516 | /* update the video clock */ 517 | frame_delay = av_q2d(is->video_st->codec->time_base); 518 | /* if we are repeating a frame, adjust clock accordingly */ 519 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); 520 | is->video_clock += frame_delay; 521 | return pts; 522 | } 523 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 524 | 525 | /* These are called whenever we allocate a frame 526 | * buffer. We use this to store the global_pts in 527 | * a frame at the time it is allocated. 528 | */ 529 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 530 | int ret = avcodec_default_get_buffer(c, pic); 531 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 532 | *pts = global_video_pkt_pts; 533 | pic->opaque = pts; 534 | return ret; 535 | } 536 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 537 | if(pic) av_freep(&pic->opaque); 538 | avcodec_default_release_buffer(c, pic); 539 | } 540 | 541 | int video_thread(void *arg) { 542 | VideoState *is = (VideoState *)arg; 543 | AVPacket pkt1, *packet = &pkt1; 544 | int frameFinished; 545 | AVFrame *pFrame; 546 | double pts; 547 | 548 | pFrame = av_frame_alloc(); 549 | 550 | for(;;) { 551 | if(packet_queue_get(&is->videoq, packet, 1) < 0) { 552 | // means we quit getting packets 553 | break; 554 | } 555 | pts = 0; 556 | 557 | // Save global pts to be stored in pFrame in first call 558 | global_video_pkt_pts = packet->pts; 559 | // Decode video frame 560 | avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, 561 | packet); 562 | if(packet->dts == AV_NOPTS_VALUE 563 | && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 564 | pts = *(uint64_t *)pFrame->opaque; 565 | } else if(packet->dts != AV_NOPTS_VALUE) { 566 | pts = packet->dts; 567 | } else { 568 | pts = 0; 569 | } 570 | pts *= av_q2d(is->video_st->time_base); 571 | 572 | // Did we get a video frame? 573 | if(frameFinished) { 574 | pts = synchronize_video(is, pFrame, pts); 575 | if(queue_picture(is, pFrame, pts) < 0) { 576 | break; 577 | } 578 | } 579 | av_free_packet(packet); 580 | } 581 | av_free(pFrame); 582 | return 0; 583 | } 584 | 585 | int stream_component_open(VideoState *is, int stream_index) { 586 | 587 | AVFormatContext *pFormatCtx = is->pFormatCtx; 588 | AVCodecContext *codecCtx = NULL; 589 | AVCodec *codec = NULL; 590 | AVDictionary *optionsDict = NULL; 591 | SDL_AudioSpec wanted_spec, spec; 592 | 593 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 594 | return -1; 595 | } 596 | 597 | // Get a pointer to the codec context for the video stream 598 | codecCtx = pFormatCtx->streams[stream_index]->codec; 599 | 600 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 601 | // Set audio settings from codec info 602 | wanted_spec.freq = codecCtx->sample_rate; 603 | wanted_spec.format = AUDIO_S16SYS; 604 | wanted_spec.channels = codecCtx->channels; 605 | wanted_spec.silence = 0; 606 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 607 | wanted_spec.callback = audio_callback; 608 | wanted_spec.userdata = is; 609 | 610 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 611 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 612 | return -1; 613 | } 614 | is->audio_hw_buf_size = spec.size; 615 | } 616 | codec = avcodec_find_decoder(codecCtx->codec_id); 617 | 618 | if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { 619 | fprintf(stderr, "Unsupported codec!\n"); 620 | return -1; 621 | } 622 | 623 | switch(codecCtx->codec_type) { 624 | case AVMEDIA_TYPE_AUDIO: 625 | is->audioStream = stream_index; 626 | is->audio_st = pFormatCtx->streams[stream_index]; 627 | is->audio_buf_size = 0; 628 | is->audio_buf_index = 0; 629 | memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); 630 | packet_queue_init(&is->audioq); 631 | SDL_PauseAudio(0); 632 | break; 633 | case AVMEDIA_TYPE_VIDEO: 634 | is->videoStream = stream_index; 635 | is->video_st = pFormatCtx->streams[stream_index]; 636 | 637 | is->frame_timer = (double)av_gettime() / 1000000.0; 638 | is->frame_last_delay = 40e-3; 639 | 640 | packet_queue_init(&is->videoq); 641 | is->video_tid = SDL_CreateThread(video_thread, is); 642 | is->sws_ctx = 643 | sws_getContext 644 | ( 645 | is->video_st->codec->width, 646 | is->video_st->codec->height, 647 | is->video_st->codec->pix_fmt, 648 | is->video_st->codec->width, 649 | is->video_st->codec->height, 650 | PIX_FMT_YUV420P, 651 | SWS_BILINEAR, 652 | NULL, 653 | NULL, 654 | NULL 655 | ); 656 | codecCtx->get_buffer2 = our_get_buffer; 657 | codecCtx->release_buffer = our_release_buffer; 658 | break; 659 | default: 660 | break; 661 | } 662 | 663 | return 0; 664 | } 665 | 666 | int decode_interrupt_cb(void *opaque) { 667 | return (global_video_state && global_video_state->quit); 668 | } 669 | 670 | int decode_thread(void *arg) { 671 | 672 | VideoState *is = (VideoState *)arg; 673 | AVFormatContext *pFormatCtx = NULL; 674 | AVPacket pkt1, *packet = &pkt1; 675 | 676 | AVDictionary *io_dict = NULL; 677 | AVIOInterruptCB callback; 678 | 679 | int video_index = -1; 680 | int audio_index = -1; 681 | int i; 682 | 683 | is->videoStream=-1; 684 | is->audioStream=-1; 685 | 686 | global_video_state = is; 687 | // will interrupt blocking functions if we quit! 688 | callback.callback = decode_interrupt_cb; 689 | callback.opaque = is; 690 | if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) 691 | { 692 | fprintf(stderr, "Unable to open I/O for %s\n", is->filename); 693 | return -1; 694 | } 695 | 696 | // Open video file 697 | if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) 698 | return -1; // Couldn't open file 699 | 700 | is->pFormatCtx = pFormatCtx; 701 | 702 | // Retrieve stream information 703 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 704 | return -1; // Couldn't find stream information 705 | 706 | // Dump information about file onto standard error 707 | av_dump_format(pFormatCtx, 0, is->filename, 0); 708 | 709 | // Find the first video stream 710 | 711 | for(i=0; inb_streams; i++) { 712 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 713 | video_index < 0) { 714 | video_index=i; 715 | } 716 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 717 | audio_index < 0) { 718 | audio_index=i; 719 | } 720 | } 721 | if(audio_index >= 0) { 722 | stream_component_open(is, audio_index); 723 | } 724 | if(video_index >= 0) { 725 | stream_component_open(is, video_index); 726 | } 727 | 728 | if(is->videoStream < 0 || is->audioStream < 0) { 729 | fprintf(stderr, "%s: could not open codecs\n", is->filename); 730 | goto fail; 731 | } 732 | 733 | // main decode loop 734 | 735 | for(;;) { 736 | if(is->quit) { 737 | break; 738 | } 739 | // seek stuff goes here 740 | if(is->audioq.size > MAX_AUDIOQ_SIZE || 741 | is->videoq.size > MAX_VIDEOQ_SIZE) { 742 | SDL_Delay(10); 743 | continue; 744 | } 745 | if(av_read_frame(is->pFormatCtx, packet) < 0) { 746 | if(is->pFormatCtx->pb->error == 0) { 747 | SDL_Delay(100); /* no error; wait for user input */ 748 | continue; 749 | } else { 750 | break; 751 | } 752 | } 753 | // Is this a packet from the video stream? 754 | if(packet->stream_index == is->videoStream) { 755 | packet_queue_put(&is->videoq, packet); 756 | } else if(packet->stream_index == is->audioStream) { 757 | packet_queue_put(&is->audioq, packet); 758 | } else { 759 | av_free_packet(packet); 760 | } 761 | } 762 | /* all done - wait for it */ 763 | while(!is->quit) { 764 | SDL_Delay(100); 765 | } 766 | 767 | fail: 768 | { 769 | SDL_Event event; 770 | event.type = FF_QUIT_EVENT; 771 | event.user.data1 = is; 772 | SDL_PushEvent(&event); 773 | } 774 | return 0; 775 | } 776 | 777 | int main(int argc, char *argv[]) { 778 | 779 | SDL_Event event; 780 | 781 | VideoState *is; 782 | 783 | is = av_mallocz(sizeof(VideoState)); 784 | 785 | if(argc < 2) { 786 | fprintf(stderr, "Usage: test \n"); 787 | exit(1); 788 | } 789 | // Register all formats and codecs 790 | av_register_all(); 791 | 792 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 793 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 794 | exit(1); 795 | } 796 | 797 | // Make a screen to put our video 798 | #ifndef __DARWIN__ 799 | screen = SDL_SetVideoMode(640, 480, 0, 0); 800 | #else 801 | screen = SDL_SetVideoMode(640, 480, 24, 0); 802 | #endif 803 | if(!screen) { 804 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 805 | exit(1); 806 | } 807 | 808 | av_strlcpy(is->filename, argv[1], 1024); 809 | 810 | is->pictq_mutex = SDL_CreateMutex(); 811 | is->pictq_cond = SDL_CreateCond(); 812 | 813 | schedule_refresh(is, 40); 814 | 815 | is->parse_tid = SDL_CreateThread(decode_thread, is); 816 | if(!is->parse_tid) { 817 | av_free(is); 818 | return -1; 819 | } 820 | for(;;) { 821 | 822 | SDL_WaitEvent(&event); 823 | switch(event.type) { 824 | case FF_QUIT_EVENT: 825 | case SDL_QUIT: 826 | is->quit = 1; 827 | /* 828 | * If the video has finished playing, then both the picture and 829 | * audio queues are waiting for more data. Make them stop 830 | * waiting and terminate normally. 831 | */ 832 | SDL_CondSignal(is->audioq.cond); 833 | SDL_CondSignal(is->videoq.cond); 834 | SDL_Quit(); 835 | exit(0); 836 | break; 837 | case FF_ALLOC_EVENT: 838 | alloc_picture(event.user.data1); 839 | break; 840 | case FF_REFRESH_EVENT: 841 | video_refresh_timer(event.user.data1); 842 | break; 843 | default: 844 | break; 845 | } 846 | } 847 | return 0; 848 | 849 | } 850 | -------------------------------------------------------------------------------- /tutorial06.c: -------------------------------------------------------------------------------- 1 | // tutorial06.c 2 | // A pedagogical video player that really works! 3 | // 4 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 5 | // 6 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 7 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 8 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 9 | // 10 | // Use the Makefile to build all the samples. 11 | // 12 | // Run using 13 | // tutorial06 myvideofile.mpg 14 | // 15 | // to play the video. 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | 27 | #ifdef __MINGW32__ 28 | #undef main /* Prevents SDL from overriding main() */ 29 | #endif 30 | 31 | #include 32 | #include 33 | 34 | #define SDL_AUDIO_BUFFER_SIZE 1024 35 | #define MAX_AUDIO_FRAME_SIZE 192000 36 | 37 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 38 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 39 | 40 | #define AV_SYNC_THRESHOLD 0.01 41 | #define AV_NOSYNC_THRESHOLD 10.0 42 | 43 | #define SAMPLE_CORRECTION_PERCENT_MAX 10 44 | #define AUDIO_DIFF_AVG_NB 20 45 | 46 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 47 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 48 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 49 | 50 | #define VIDEO_PICTURE_QUEUE_SIZE 1 51 | 52 | #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER 53 | 54 | typedef struct PacketQueue { 55 | AVPacketList *first_pkt, *last_pkt; 56 | int nb_packets; 57 | int size; 58 | SDL_mutex *mutex; 59 | SDL_cond *cond; 60 | } PacketQueue; 61 | 62 | 63 | typedef struct VideoPicture { 64 | SDL_Overlay *bmp; 65 | int width, height; /* source height & width */ 66 | int allocated; 67 | double pts; 68 | } VideoPicture; 69 | 70 | typedef struct VideoState { 71 | 72 | AVFormatContext *pFormatCtx; 73 | int videoStream, audioStream; 74 | 75 | int av_sync_type; 76 | double external_clock; /* external clock base */ 77 | int64_t external_clock_time; 78 | 79 | double audio_clock; 80 | AVStream *audio_st; 81 | PacketQueue audioq; 82 | AVFrame audio_frame; 83 | uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2]; 84 | unsigned int audio_buf_size; 85 | unsigned int audio_buf_index; 86 | AVPacket audio_pkt; 87 | uint8_t *audio_pkt_data; 88 | int audio_pkt_size; 89 | int audio_hw_buf_size; 90 | double audio_diff_cum; /* used for AV difference average computation */ 91 | double audio_diff_avg_coef; 92 | double audio_diff_threshold; 93 | int audio_diff_avg_count; 94 | double frame_timer; 95 | double frame_last_pts; 96 | double frame_last_delay; 97 | double video_clock; ///mutex = SDL_CreateMutex(); 133 | q->cond = SDL_CreateCond(); 134 | } 135 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 136 | 137 | AVPacketList *pkt1; 138 | if(av_dup_packet(pkt) < 0) { 139 | return -1; 140 | } 141 | pkt1 = av_malloc(sizeof(AVPacketList)); 142 | if (!pkt1) 143 | return -1; 144 | pkt1->pkt = *pkt; 145 | pkt1->next = NULL; 146 | 147 | SDL_LockMutex(q->mutex); 148 | 149 | if (!q->last_pkt) 150 | q->first_pkt = pkt1; 151 | else 152 | q->last_pkt->next = pkt1; 153 | q->last_pkt = pkt1; 154 | q->nb_packets++; 155 | q->size += pkt1->pkt.size; 156 | SDL_CondSignal(q->cond); 157 | 158 | SDL_UnlockMutex(q->mutex); 159 | return 0; 160 | } 161 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 162 | { 163 | AVPacketList *pkt1; 164 | int ret; 165 | 166 | SDL_LockMutex(q->mutex); 167 | 168 | for(;;) { 169 | 170 | if(global_video_state->quit) { 171 | ret = -1; 172 | break; 173 | } 174 | 175 | pkt1 = q->first_pkt; 176 | if (pkt1) { 177 | q->first_pkt = pkt1->next; 178 | if (!q->first_pkt) 179 | q->last_pkt = NULL; 180 | q->nb_packets--; 181 | q->size -= pkt1->pkt.size; 182 | *pkt = pkt1->pkt; 183 | av_free(pkt1); 184 | ret = 1; 185 | break; 186 | } else if (!block) { 187 | ret = 0; 188 | break; 189 | } else { 190 | SDL_CondWait(q->cond, q->mutex); 191 | } 192 | } 193 | SDL_UnlockMutex(q->mutex); 194 | return ret; 195 | } 196 | double get_audio_clock(VideoState *is) { 197 | double pts; 198 | int hw_buf_size, bytes_per_sec, n; 199 | 200 | pts = is->audio_clock; /* maintained in the audio thread */ 201 | hw_buf_size = is->audio_buf_size - is->audio_buf_index; 202 | bytes_per_sec = 0; 203 | n = is->audio_st->codec->channels * 2; 204 | if(is->audio_st) { 205 | bytes_per_sec = is->audio_st->codec->sample_rate * n; 206 | } 207 | if(bytes_per_sec) { 208 | pts -= (double)hw_buf_size / bytes_per_sec; 209 | } 210 | return pts; 211 | } 212 | double get_video_clock(VideoState *is) { 213 | double delta; 214 | 215 | delta = (av_gettime() - is->video_current_pts_time) / 1000000.0; 216 | return is->video_current_pts + delta; 217 | } 218 | double get_external_clock(VideoState *is) { 219 | return av_gettime() / 1000000.0; 220 | } 221 | 222 | double get_master_clock(VideoState *is) { 223 | if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) { 224 | return get_video_clock(is); 225 | } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) { 226 | return get_audio_clock(is); 227 | } else { 228 | return get_external_clock(is); 229 | } 230 | } 231 | /* Add or subtract samples to get a better sync, return new 232 | audio buffer size */ 233 | int synchronize_audio(VideoState *is, short *samples, 234 | int samples_size, double pts) { 235 | int n; 236 | double ref_clock; 237 | 238 | n = 2 * is->audio_st->codec->channels; 239 | 240 | if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { 241 | double diff, avg_diff; 242 | int wanted_size, min_size, max_size /*, nb_samples */; 243 | 244 | ref_clock = get_master_clock(is); 245 | diff = get_audio_clock(is) - ref_clock; 246 | 247 | if(diff < AV_NOSYNC_THRESHOLD) { 248 | // accumulate the diffs 249 | is->audio_diff_cum = diff + is->audio_diff_avg_coef 250 | * is->audio_diff_cum; 251 | if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { 252 | is->audio_diff_avg_count++; 253 | } else { 254 | avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); 255 | if(fabs(avg_diff) >= is->audio_diff_threshold) { 256 | wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); 257 | min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); 258 | max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); 259 | if(wanted_size < min_size) { 260 | wanted_size = min_size; 261 | } else if (wanted_size > max_size) { 262 | wanted_size = max_size; 263 | } 264 | if(wanted_size < samples_size) { 265 | /* remove samples */ 266 | samples_size = wanted_size; 267 | } else if(wanted_size > samples_size) { 268 | uint8_t *samples_end, *q; 269 | int nb; 270 | 271 | /* add samples by copying final sample*/ 272 | nb = (samples_size - wanted_size); 273 | samples_end = (uint8_t *)samples + samples_size - n; 274 | q = samples_end + n; 275 | while(nb > 0) { 276 | memcpy(q, samples_end, n); 277 | q += n; 278 | nb -= n; 279 | } 280 | samples_size = wanted_size; 281 | } 282 | } 283 | } 284 | } else { 285 | /* difference is TOO big; reset diff stuff */ 286 | is->audio_diff_avg_count = 0; 287 | is->audio_diff_cum = 0; 288 | } 289 | } 290 | return samples_size; 291 | } 292 | 293 | int audio_decode_frame(VideoState *is, double *pts_ptr) { 294 | 295 | int len1, data_size = 0, n; 296 | AVPacket *pkt = &is->audio_pkt; 297 | double pts; 298 | 299 | for(;;) { 300 | while(is->audio_pkt_size > 0) { 301 | int got_frame = 0; 302 | len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); 303 | if(len1 < 0) { 304 | /* if error, skip frame */ 305 | is->audio_pkt_size = 0; 306 | break; 307 | } 308 | if (got_frame) 309 | { 310 | data_size = 311 | av_samples_get_buffer_size 312 | ( 313 | NULL, 314 | is->audio_st->codec->channels, 315 | is->audio_frame.nb_samples, 316 | is->audio_st->codec->sample_fmt, 317 | 1 318 | ); 319 | memcpy(is->audio_buf, is->audio_frame.data[0], data_size); 320 | } 321 | is->audio_pkt_data += len1; 322 | is->audio_pkt_size -= len1; 323 | if(data_size <= 0) { 324 | /* No data yet, get more frames */ 325 | continue; 326 | } 327 | pts = is->audio_clock; 328 | *pts_ptr = pts; 329 | n = 2 * is->audio_st->codec->channels; 330 | is->audio_clock += (double)data_size / 331 | (double)(n * is->audio_st->codec->sample_rate); 332 | 333 | /* We have data, return it and come back for more later */ 334 | return data_size; 335 | } 336 | if(pkt->data) 337 | av_free_packet(pkt); 338 | 339 | if(is->quit) { 340 | return -1; 341 | } 342 | /* next packet */ 343 | if(packet_queue_get(&is->audioq, pkt, 1) < 0) { 344 | return -1; 345 | } 346 | is->audio_pkt_data = pkt->data; 347 | is->audio_pkt_size = pkt->size; 348 | /* if update, update the audio clock w/pts */ 349 | if(pkt->pts != AV_NOPTS_VALUE) { 350 | is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; 351 | } 352 | } 353 | } 354 | 355 | void audio_callback(void *userdata, Uint8 *stream, int len) { 356 | 357 | VideoState *is = (VideoState *)userdata; 358 | int len1, audio_size; 359 | double pts; 360 | 361 | while(len > 0) { 362 | if(is->audio_buf_index >= is->audio_buf_size) { 363 | /* We have already sent all our data; get more */ 364 | audio_size = audio_decode_frame(is, &pts); 365 | if(audio_size < 0) { 366 | /* If error, output silence */ 367 | is->audio_buf_size = 1024; 368 | memset(is->audio_buf, 0, is->audio_buf_size); 369 | } else { 370 | audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, 371 | audio_size, pts); 372 | is->audio_buf_size = audio_size; 373 | } 374 | is->audio_buf_index = 0; 375 | } 376 | len1 = is->audio_buf_size - is->audio_buf_index; 377 | if(len1 > len) 378 | len1 = len; 379 | memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); 380 | len -= len1; 381 | stream += len1; 382 | is->audio_buf_index += len1; 383 | } 384 | } 385 | 386 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 387 | SDL_Event event; 388 | event.type = FF_REFRESH_EVENT; 389 | event.user.data1 = opaque; 390 | SDL_PushEvent(&event); 391 | return 0; /* 0 means stop timer */ 392 | } 393 | 394 | /* schedule a video refresh in 'delay' ms */ 395 | static void schedule_refresh(VideoState *is, int delay) { 396 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 397 | } 398 | 399 | void video_display(VideoState *is) { 400 | 401 | SDL_Rect rect; 402 | VideoPicture *vp; 403 | //AVPicture pict; 404 | float aspect_ratio; 405 | int w, h, x, y; 406 | //int i; 407 | 408 | vp = &is->pictq[is->pictq_rindex]; 409 | if(vp->bmp) { 410 | if(is->video_st->codec->sample_aspect_ratio.num == 0) { 411 | aspect_ratio = 0; 412 | } else { 413 | aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * 414 | is->video_st->codec->width / is->video_st->codec->height; 415 | } 416 | if(aspect_ratio <= 0.0) { 417 | aspect_ratio = (float)is->video_st->codec->width / 418 | (float)is->video_st->codec->height; 419 | } 420 | h = screen->h; 421 | w = ((int)rint(h * aspect_ratio)) & -3; 422 | if(w > screen->w) { 423 | w = screen->w; 424 | h = ((int)rint(w / aspect_ratio)) & -3; 425 | } 426 | x = (screen->w - w) / 2; 427 | y = (screen->h - h) / 2; 428 | 429 | rect.x = x; 430 | rect.y = y; 431 | rect.w = w; 432 | rect.h = h; 433 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 434 | } 435 | } 436 | 437 | void video_refresh_timer(void *userdata) { 438 | 439 | VideoState *is = (VideoState *)userdata; 440 | VideoPicture *vp; 441 | double actual_delay, delay, sync_threshold, ref_clock, diff; 442 | 443 | if(is->video_st) { 444 | if(is->pictq_size == 0) { 445 | schedule_refresh(is, 1); 446 | } else { 447 | vp = &is->pictq[is->pictq_rindex]; 448 | 449 | is->video_current_pts = vp->pts; 450 | is->video_current_pts_time = av_gettime(); 451 | 452 | delay = vp->pts - is->frame_last_pts; /* the pts from last time */ 453 | if(delay <= 0 || delay >= 1.0) { 454 | /* if incorrect delay, use previous one */ 455 | delay = is->frame_last_delay; 456 | } 457 | /* save for next time */ 458 | is->frame_last_delay = delay; 459 | is->frame_last_pts = vp->pts; 460 | 461 | /* update delay to sync to audio if not master source */ 462 | if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { 463 | ref_clock = get_master_clock(is); 464 | diff = vp->pts - ref_clock; 465 | 466 | /* Skip or repeat the frame. Take delay into account 467 | FFPlay still doesn't "know if this is the best guess." */ 468 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 469 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 470 | if(diff <= -sync_threshold) { 471 | delay = 0; 472 | } else if(diff >= sync_threshold) { 473 | delay = 2 * delay; 474 | } 475 | } 476 | } 477 | 478 | is->frame_timer += delay; 479 | /* computer the REAL delay */ 480 | actual_delay = is->frame_timer - (av_gettime() / 1000000.0); 481 | if(actual_delay < 0.010) { 482 | /* Really it should skip the picture instead */ 483 | actual_delay = 0.010; 484 | } 485 | schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); 486 | 487 | /* show the picture! */ 488 | video_display(is); 489 | 490 | /* update queue for next picture! */ 491 | if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 492 | is->pictq_rindex = 0; 493 | } 494 | SDL_LockMutex(is->pictq_mutex); 495 | is->pictq_size--; 496 | SDL_CondSignal(is->pictq_cond); 497 | SDL_UnlockMutex(is->pictq_mutex); 498 | } 499 | } else { 500 | schedule_refresh(is, 100); 501 | } 502 | } 503 | 504 | void alloc_picture(void *userdata) { 505 | 506 | VideoState *is = (VideoState *)userdata; 507 | VideoPicture *vp; 508 | 509 | vp = &is->pictq[is->pictq_windex]; 510 | if(vp->bmp) { 511 | // we already have one make another, bigger/smaller 512 | SDL_FreeYUVOverlay(vp->bmp); 513 | } 514 | // Allocate a place to put our YUV image on that screen 515 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 516 | is->video_st->codec->height, 517 | SDL_YV12_OVERLAY, 518 | screen); 519 | vp->width = is->video_st->codec->width; 520 | vp->height = is->video_st->codec->height; 521 | 522 | SDL_LockMutex(is->pictq_mutex); 523 | vp->allocated = 1; 524 | SDL_CondSignal(is->pictq_cond); 525 | SDL_UnlockMutex(is->pictq_mutex); 526 | 527 | } 528 | 529 | int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { 530 | 531 | VideoPicture *vp; 532 | AVPicture pict; 533 | 534 | /* wait until we have space for a new pic */ 535 | SDL_LockMutex(is->pictq_mutex); 536 | while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 537 | !is->quit) { 538 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 539 | } 540 | SDL_UnlockMutex(is->pictq_mutex); 541 | 542 | if(is->quit) 543 | return -1; 544 | 545 | // windex is set to 0 initially 546 | vp = &is->pictq[is->pictq_windex]; 547 | 548 | /* allocate or resize the buffer! */ 549 | if(!vp->bmp || 550 | vp->width != is->video_st->codec->width || 551 | vp->height != is->video_st->codec->height) { 552 | SDL_Event event; 553 | 554 | vp->allocated = 0; 555 | /* we have to do it in the main thread */ 556 | event.type = FF_ALLOC_EVENT; 557 | event.user.data1 = is; 558 | SDL_PushEvent(&event); 559 | 560 | /* wait until we have a picture allocated */ 561 | SDL_LockMutex(is->pictq_mutex); 562 | while(!vp->allocated && !is->quit) { 563 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 564 | } 565 | SDL_UnlockMutex(is->pictq_mutex); 566 | if(is->quit) { 567 | return -1; 568 | } 569 | } 570 | /* We have a place to put our picture on the queue */ 571 | /* If we are skipping a frame, do we set this to null 572 | but still return vp->allocated = 1? */ 573 | 574 | 575 | if(vp->bmp) { 576 | 577 | SDL_LockYUVOverlay(vp->bmp); 578 | 579 | /* point pict at the queue */ 580 | 581 | pict.data[0] = vp->bmp->pixels[0]; 582 | pict.data[1] = vp->bmp->pixels[2]; 583 | pict.data[2] = vp->bmp->pixels[1]; 584 | 585 | pict.linesize[0] = vp->bmp->pitches[0]; 586 | pict.linesize[1] = vp->bmp->pitches[2]; 587 | pict.linesize[2] = vp->bmp->pitches[1]; 588 | 589 | // Convert the image into YUV format that SDL uses 590 | sws_scale 591 | ( 592 | is->sws_ctx, 593 | (uint8_t const * const *)pFrame->data, 594 | pFrame->linesize, 595 | 0, 596 | is->video_st->codec->height, 597 | pict.data, 598 | pict.linesize 599 | ); 600 | 601 | SDL_UnlockYUVOverlay(vp->bmp); 602 | vp->pts = pts; 603 | 604 | /* now we inform our display thread that we have a pic ready */ 605 | if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 606 | is->pictq_windex = 0; 607 | } 608 | SDL_LockMutex(is->pictq_mutex); 609 | is->pictq_size++; 610 | SDL_UnlockMutex(is->pictq_mutex); 611 | } 612 | return 0; 613 | } 614 | 615 | double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) { 616 | 617 | double frame_delay; 618 | 619 | if(pts != 0) { 620 | /* if we have pts, set video clock to it */ 621 | is->video_clock = pts; 622 | } else { 623 | /* if we aren't given a pts, set it to the clock */ 624 | pts = is->video_clock; 625 | } 626 | /* update the video clock */ 627 | frame_delay = av_q2d(is->video_st->codec->time_base); 628 | /* if we are repeating a frame, adjust clock accordingly */ 629 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); 630 | is->video_clock += frame_delay; 631 | return pts; 632 | } 633 | 634 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 635 | 636 | /* These are called whenever we allocate a frame 637 | * buffer. We use this to store the global_pts in 638 | * a frame at the time it is allocated. 639 | */ 640 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 641 | int ret = avcodec_default_get_buffer(c, pic); 642 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 643 | *pts = global_video_pkt_pts; 644 | pic->opaque = pts; 645 | return ret; 646 | } 647 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 648 | if(pic) av_freep(&pic->opaque); 649 | avcodec_default_release_buffer(c, pic); 650 | } 651 | 652 | int video_thread(void *arg) { 653 | VideoState *is = (VideoState *)arg; 654 | AVPacket pkt1, *packet = &pkt1; 655 | int frameFinished; 656 | AVFrame *pFrame; 657 | double pts; 658 | 659 | pFrame = av_frame_alloc(); 660 | 661 | for(;;) { 662 | if(packet_queue_get(&is->videoq, packet, 1) < 0) { 663 | // means we quit getting packets 664 | break; 665 | } 666 | pts = 0; 667 | 668 | // Save global pts to be stored in pFrame in first call 669 | global_video_pkt_pts = packet->pts; 670 | // Decode video frame 671 | avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, 672 | packet); 673 | if(packet->dts == AV_NOPTS_VALUE 674 | && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 675 | pts = *(uint64_t *)pFrame->opaque; 676 | } else if(packet->dts != AV_NOPTS_VALUE) { 677 | pts = packet->dts; 678 | } else { 679 | pts = 0; 680 | } 681 | pts *= av_q2d(is->video_st->time_base); 682 | 683 | // Did we get a video frame? 684 | if(frameFinished) { 685 | pts = synchronize_video(is, pFrame, pts); 686 | if(queue_picture(is, pFrame, pts) < 0) { 687 | break; 688 | } 689 | } 690 | av_free_packet(packet); 691 | } 692 | av_free(pFrame); 693 | return 0; 694 | } 695 | 696 | int stream_component_open(VideoState *is, int stream_index) { 697 | 698 | AVFormatContext *pFormatCtx = is->pFormatCtx; 699 | AVCodecContext *codecCtx = NULL; 700 | AVCodec *codec = NULL; 701 | AVDictionary *optionsDict = NULL; 702 | SDL_AudioSpec wanted_spec, spec; 703 | 704 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 705 | return -1; 706 | } 707 | 708 | // Get a pointer to the codec context for the video stream 709 | codecCtx = pFormatCtx->streams[stream_index]->codec; 710 | 711 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 712 | // Set audio settings from codec info 713 | wanted_spec.freq = codecCtx->sample_rate; 714 | wanted_spec.format = AUDIO_S16SYS; 715 | wanted_spec.channels = codecCtx->channels; 716 | wanted_spec.silence = 0; 717 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 718 | wanted_spec.callback = audio_callback; 719 | wanted_spec.userdata = is; 720 | 721 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 722 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 723 | return -1; 724 | } 725 | is->audio_hw_buf_size = spec.size; 726 | } 727 | codec = avcodec_find_decoder(codecCtx->codec_id); 728 | if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { 729 | fprintf(stderr, "Unsupported codec!\n"); 730 | return -1; 731 | } 732 | 733 | switch(codecCtx->codec_type) { 734 | case AVMEDIA_TYPE_AUDIO: 735 | is->audioStream = stream_index; 736 | is->audio_st = pFormatCtx->streams[stream_index]; 737 | is->audio_buf_size = 0; 738 | is->audio_buf_index = 0; 739 | 740 | /* averaging filter for audio sync */ 741 | is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); 742 | is->audio_diff_avg_count = 0; 743 | /* Correct audio only if larger error than this */ 744 | is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; 745 | 746 | memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); 747 | packet_queue_init(&is->audioq); 748 | SDL_PauseAudio(0); 749 | break; 750 | case AVMEDIA_TYPE_VIDEO: 751 | is->videoStream = stream_index; 752 | is->video_st = pFormatCtx->streams[stream_index]; 753 | 754 | is->frame_timer = (double)av_gettime() / 1000000.0; 755 | is->frame_last_delay = 40e-3; 756 | is->video_current_pts_time = av_gettime(); 757 | 758 | packet_queue_init(&is->videoq); 759 | is->video_tid = SDL_CreateThread(video_thread, is); 760 | is->sws_ctx = 761 | sws_getContext 762 | ( 763 | is->video_st->codec->width, 764 | is->video_st->codec->height, 765 | is->video_st->codec->pix_fmt, 766 | is->video_st->codec->width, 767 | is->video_st->codec->height, 768 | PIX_FMT_YUV420P, 769 | SWS_BILINEAR, 770 | NULL, 771 | NULL, 772 | NULL 773 | ); 774 | codecCtx->get_buffer2 = our_get_buffer; 775 | codecCtx->release_buffer = our_release_buffer; 776 | break; 777 | default: 778 | break; 779 | } 780 | 781 | return 0; 782 | } 783 | 784 | int decode_interrupt_cb(void *opaque) { 785 | return (global_video_state && global_video_state->quit); 786 | } 787 | 788 | int decode_thread(void *arg) { 789 | 790 | VideoState *is = (VideoState *)arg; 791 | AVFormatContext *pFormatCtx = NULL; 792 | AVPacket pkt1, *packet = &pkt1; 793 | 794 | AVDictionary *io_dict = NULL; 795 | AVIOInterruptCB callback; 796 | 797 | int video_index = -1; 798 | int audio_index = -1; 799 | int i; 800 | 801 | is->videoStream=-1; 802 | is->audioStream=-1; 803 | 804 | global_video_state = is; 805 | // will interrupt blocking functions if we quit! 806 | callback.callback = decode_interrupt_cb; 807 | callback.opaque = is; 808 | if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) 809 | { 810 | fprintf(stderr, "Unable to open I/O for %s\n", is->filename); 811 | return -1; 812 | } 813 | 814 | // Open video file 815 | if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) 816 | return -1; // Couldn't open file 817 | 818 | is->pFormatCtx = pFormatCtx; 819 | 820 | // Retrieve stream information 821 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 822 | return -1; // Couldn't find stream information 823 | 824 | // Dump information about file onto standard error 825 | av_dump_format(pFormatCtx, 0, is->filename, 0); 826 | 827 | // Find the first video stream 828 | 829 | for(i=0; inb_streams; i++) { 830 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 831 | video_index < 0) { 832 | video_index=i; 833 | } 834 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 835 | audio_index < 0) { 836 | audio_index=i; 837 | } 838 | } 839 | if(audio_index >= 0) { 840 | stream_component_open(is, audio_index); 841 | } 842 | if(video_index >= 0) { 843 | stream_component_open(is, video_index); 844 | } 845 | 846 | if(is->videoStream < 0 || is->audioStream < 0) { 847 | fprintf(stderr, "%s: could not open codecs\n", is->filename); 848 | goto fail; 849 | } 850 | 851 | // main decode loop 852 | 853 | for(;;) { 854 | if(is->quit) { 855 | break; 856 | } 857 | // seek stuff goes here 858 | if(is->audioq.size > MAX_AUDIOQ_SIZE || 859 | is->videoq.size > MAX_VIDEOQ_SIZE) { 860 | SDL_Delay(10); 861 | continue; 862 | } 863 | if(av_read_frame(is->pFormatCtx, packet) < 0) { 864 | if(is->pFormatCtx->pb->error == 0) { 865 | SDL_Delay(100); /* no error; wait for user input */ 866 | continue; 867 | } else { 868 | break; 869 | } 870 | } 871 | // Is this a packet from the video stream? 872 | if(packet->stream_index == is->videoStream) { 873 | packet_queue_put(&is->videoq, packet); 874 | } else if(packet->stream_index == is->audioStream) { 875 | packet_queue_put(&is->audioq, packet); 876 | } else { 877 | av_free_packet(packet); 878 | } 879 | } 880 | /* all done - wait for it */ 881 | while(!is->quit) { 882 | SDL_Delay(100); 883 | } 884 | 885 | fail: 886 | { 887 | SDL_Event event; 888 | event.type = FF_QUIT_EVENT; 889 | event.user.data1 = is; 890 | SDL_PushEvent(&event); 891 | } 892 | return 0; 893 | } 894 | 895 | int main(int argc, char *argv[]) { 896 | 897 | SDL_Event event; 898 | 899 | VideoState *is; 900 | 901 | is = av_mallocz(sizeof(VideoState)); 902 | 903 | if(argc < 2) { 904 | fprintf(stderr, "Usage: test \n"); 905 | exit(1); 906 | } 907 | // Register all formats and codecs 908 | av_register_all(); 909 | 910 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 911 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 912 | exit(1); 913 | } 914 | 915 | // Make a screen to put our video 916 | #ifndef __DARWIN__ 917 | screen = SDL_SetVideoMode(640, 480, 0, 0); 918 | #else 919 | screen = SDL_SetVideoMode(640, 480, 24, 0); 920 | #endif 921 | if(!screen) { 922 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 923 | exit(1); 924 | } 925 | 926 | av_strlcpy(is->filename, argv[1], 1024); 927 | 928 | is->pictq_mutex = SDL_CreateMutex(); 929 | is->pictq_cond = SDL_CreateCond(); 930 | 931 | schedule_refresh(is, 40); 932 | 933 | is->av_sync_type = DEFAULT_AV_SYNC_TYPE; 934 | is->parse_tid = SDL_CreateThread(decode_thread, is); 935 | if(!is->parse_tid) { 936 | av_free(is); 937 | return -1; 938 | } 939 | for(;;) { 940 | 941 | SDL_WaitEvent(&event); 942 | switch(event.type) { 943 | case FF_QUIT_EVENT: 944 | case SDL_QUIT: 945 | is->quit = 1; 946 | /* 947 | * If the video has finished playing, then both the picture and 948 | * audio queues are waiting for more data. Make them stop 949 | * waiting and terminate normally. 950 | */ 951 | SDL_CondSignal(is->audioq.cond); 952 | SDL_CondSignal(is->videoq.cond); 953 | SDL_Quit(); 954 | exit(0); 955 | break; 956 | case FF_ALLOC_EVENT: 957 | alloc_picture(event.user.data1); 958 | break; 959 | case FF_REFRESH_EVENT: 960 | video_refresh_timer(event.user.data1); 961 | break; 962 | default: 963 | break; 964 | } 965 | } 966 | return 0; 967 | 968 | } 969 | -------------------------------------------------------------------------------- /tutorial07.c: -------------------------------------------------------------------------------- 1 | // tutorial07.c 2 | // A pedagogical video player that really works! Now with seeking features. 3 | // 4 | // This tutorial was written by Stephen Dranger (dranger@gmail.com). 5 | // 6 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 7 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 8 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 9 | // 10 | // Use the Makefile to build all the samples. 11 | // 12 | // Run using 13 | // tutorial07 myvideofile.mpg 14 | // 15 | // to play the video. 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #include 27 | #include 28 | #ifdef __MINGW32__ 29 | #undef main /* Prevents SDL from overriding main() */ 30 | #endif 31 | #include 32 | #include 33 | 34 | #define SDL_AUDIO_BUFFER_SIZE 1024 35 | #define MAX_AUDIO_FRAME_SIZE 192000 36 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 37 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 38 | #define AV_SYNC_THRESHOLD 0.01 39 | #define AV_NOSYNC_THRESHOLD 10.0 40 | #define SAMPLE_CORRECTION_PERCENT_MAX 10 41 | #define AUDIO_DIFF_AVG_NB 20 42 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 43 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 44 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 45 | #define VIDEO_PICTURE_QUEUE_SIZE 1 46 | #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER 47 | 48 | typedef struct PacketQueue { 49 | AVPacketList *first_pkt, *last_pkt; 50 | int nb_packets; 51 | int size; 52 | SDL_mutex *mutex; 53 | SDL_cond *cond; 54 | } PacketQueue; 55 | typedef struct VideoPicture { 56 | SDL_Overlay *bmp; 57 | int width, height; /* source height & width */ 58 | int allocated; 59 | double pts; 60 | } VideoPicture; 61 | 62 | typedef struct VideoState { 63 | AVFormatContext *pFormatCtx; 64 | int videoStream, audioStream; 65 | 66 | int av_sync_type; 67 | double external_clock; /* external clock base */ 68 | int64_t external_clock_time; 69 | int seek_req; 70 | int seek_flags; 71 | int64_t seek_pos; 72 | 73 | double audio_clock; 74 | AVStream *audio_st; 75 | PacketQueue audioq; 76 | AVFrame audio_frame; 77 | uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2]; 78 | unsigned int audio_buf_size; 79 | unsigned int audio_buf_index; 80 | AVPacket audio_pkt; 81 | uint8_t *audio_pkt_data; 82 | int audio_pkt_size; 83 | int audio_hw_buf_size; 84 | double audio_diff_cum; /* used for AV difference average computation */ 85 | double audio_diff_avg_coef; 86 | double audio_diff_threshold; 87 | int audio_diff_avg_count; 88 | double frame_timer; 89 | double frame_last_pts; 90 | double frame_last_delay; 91 | double video_clock; ///mutex = SDL_CreateMutex(); 127 | q->cond = SDL_CreateCond(); 128 | } 129 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 130 | 131 | AVPacketList *pkt1; 132 | if(pkt != &flush_pkt && av_dup_packet(pkt) < 0) { 133 | return -1; 134 | } 135 | pkt1 = av_malloc(sizeof(AVPacketList)); 136 | if (!pkt1) 137 | return -1; 138 | pkt1->pkt = *pkt; 139 | pkt1->next = NULL; 140 | 141 | SDL_LockMutex(q->mutex); 142 | 143 | if (!q->last_pkt) 144 | q->first_pkt = pkt1; 145 | else 146 | q->last_pkt->next = pkt1; 147 | q->last_pkt = pkt1; 148 | q->nb_packets++; 149 | q->size += pkt1->pkt.size; 150 | SDL_CondSignal(q->cond); 151 | 152 | SDL_UnlockMutex(q->mutex); 153 | return 0; 154 | } 155 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 156 | { 157 | AVPacketList *pkt1; 158 | int ret; 159 | 160 | SDL_LockMutex(q->mutex); 161 | 162 | for(;;) { 163 | 164 | if(global_video_state->quit) { 165 | ret = -1; 166 | break; 167 | } 168 | 169 | pkt1 = q->first_pkt; 170 | if (pkt1) { 171 | q->first_pkt = pkt1->next; 172 | if (!q->first_pkt) 173 | q->last_pkt = NULL; 174 | q->nb_packets--; 175 | q->size -= pkt1->pkt.size; 176 | *pkt = pkt1->pkt; 177 | av_free(pkt1); 178 | ret = 1; 179 | break; 180 | } else if (!block) { 181 | ret = 0; 182 | break; 183 | } else { 184 | SDL_CondWait(q->cond, q->mutex); 185 | } 186 | } 187 | SDL_UnlockMutex(q->mutex); 188 | return ret; 189 | } 190 | static void packet_queue_flush(PacketQueue *q) { 191 | AVPacketList *pkt, *pkt1; 192 | 193 | SDL_LockMutex(q->mutex); 194 | for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) { 195 | pkt1 = pkt->next; 196 | av_free_packet(&pkt->pkt); 197 | av_freep(&pkt); 198 | } 199 | q->last_pkt = NULL; 200 | q->first_pkt = NULL; 201 | q->nb_packets = 0; 202 | q->size = 0; 203 | SDL_UnlockMutex(q->mutex); 204 | } 205 | double get_audio_clock(VideoState *is) { 206 | double pts; 207 | int hw_buf_size, bytes_per_sec, n; 208 | 209 | pts = is->audio_clock; /* maintained in the audio thread */ 210 | hw_buf_size = is->audio_buf_size - is->audio_buf_index; 211 | bytes_per_sec = 0; 212 | n = is->audio_st->codec->channels * 2; 213 | if(is->audio_st) { 214 | bytes_per_sec = is->audio_st->codec->sample_rate * n; 215 | } 216 | if(bytes_per_sec) { 217 | pts -= (double)hw_buf_size / bytes_per_sec; 218 | } 219 | return pts; 220 | } 221 | double get_video_clock(VideoState *is) { 222 | double delta; 223 | 224 | delta = (av_gettime() - is->video_current_pts_time) / 1000000.0; 225 | return is->video_current_pts + delta; 226 | } 227 | double get_external_clock(VideoState *is) { 228 | return av_gettime() / 1000000.0; 229 | } 230 | double get_master_clock(VideoState *is) { 231 | if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) { 232 | return get_video_clock(is); 233 | } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) { 234 | return get_audio_clock(is); 235 | } else { 236 | return get_external_clock(is); 237 | } 238 | } 239 | /* Add or subtract samples to get a better sync, return new 240 | audio buffer size */ 241 | int synchronize_audio(VideoState *is, short *samples, 242 | int samples_size, double pts) { 243 | int n; 244 | double ref_clock; 245 | 246 | n = 2 * is->audio_st->codec->channels; 247 | 248 | if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { 249 | double diff, avg_diff; 250 | int wanted_size, min_size, max_size /*, nb_samples */; 251 | 252 | ref_clock = get_master_clock(is); 253 | diff = get_audio_clock(is) - ref_clock; 254 | 255 | if(diff < AV_NOSYNC_THRESHOLD) { 256 | // accumulate the diffs 257 | is->audio_diff_cum = diff + is->audio_diff_avg_coef 258 | * is->audio_diff_cum; 259 | if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { 260 | is->audio_diff_avg_count++; 261 | } else { 262 | avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); 263 | if(fabs(avg_diff) >= is->audio_diff_threshold) { 264 | wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); 265 | min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); 266 | max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); 267 | if(wanted_size < min_size) { 268 | wanted_size = min_size; 269 | } else if (wanted_size > max_size) { 270 | wanted_size = max_size; 271 | } 272 | if(wanted_size < samples_size) { 273 | /* remove samples */ 274 | samples_size = wanted_size; 275 | } else if(wanted_size > samples_size) { 276 | uint8_t *samples_end, *q; 277 | int nb; 278 | 279 | /* add samples by copying final sample*/ 280 | nb = (samples_size - wanted_size); 281 | samples_end = (uint8_t *)samples + samples_size - n; 282 | q = samples_end + n; 283 | while(nb > 0) { 284 | memcpy(q, samples_end, n); 285 | q += n; 286 | nb -= n; 287 | } 288 | samples_size = wanted_size; 289 | } 290 | } 291 | } 292 | } else { 293 | /* difference is TOO big; reset diff stuff */ 294 | is->audio_diff_avg_count = 0; 295 | is->audio_diff_cum = 0; 296 | } 297 | } 298 | return samples_size; 299 | } 300 | 301 | int decode_frame_from_packet(VideoState *is, AVFrame decoded_frame) 302 | { 303 | int64_t src_ch_layout, dst_ch_layout; 304 | int src_rate, dst_rate; 305 | uint8_t **src_data = NULL, **dst_data = NULL; 306 | int src_nb_channels = 0, dst_nb_channels = 0; 307 | int src_linesize, dst_linesize; 308 | int src_nb_samples, dst_nb_samples, max_dst_nb_samples; 309 | enum AVSampleFormat src_sample_fmt, dst_sample_fmt; 310 | int dst_bufsize; 311 | int ret; 312 | 313 | src_nb_samples = decoded_frame.nb_samples; 314 | src_linesize = (int) decoded_frame.linesize; 315 | src_data = decoded_frame.data; 316 | 317 | if (decoded_frame.channel_layout == 0) { 318 | decoded_frame.channel_layout = av_get_default_channel_layout(decoded_frame.channels); 319 | } 320 | 321 | src_rate = decoded_frame.sample_rate; 322 | dst_rate = decoded_frame.sample_rate; 323 | src_ch_layout = decoded_frame.channel_layout; 324 | dst_ch_layout = decoded_frame.channel_layout; 325 | src_sample_fmt = decoded_frame.format; 326 | dst_sample_fmt = AV_SAMPLE_FMT_S16; 327 | 328 | av_opt_set_int(is->sws_ctx_audio, "in_channel_layout", src_ch_layout, 0); 329 | av_opt_set_int(is->sws_ctx_audio, "out_channel_layout", dst_ch_layout, 0); 330 | av_opt_set_int(is->sws_ctx_audio, "in_sample_rate", src_rate, 0); 331 | av_opt_set_int(is->sws_ctx_audio, "out_sample_rate", dst_rate, 0); 332 | av_opt_set_sample_fmt(is->sws_ctx_audio, "in_sample_fmt", src_sample_fmt, 0); 333 | av_opt_set_sample_fmt(is->sws_ctx_audio, "out_sample_fmt", dst_sample_fmt, 0); 334 | 335 | /* initialize the resampling context */ 336 | if ((ret = swr_init(is->sws_ctx_audio)) < 0) { 337 | fprintf(stderr, "Failed to initialize the resampling context\n"); 338 | return -1; 339 | } 340 | 341 | /* allocate source and destination samples buffers */ 342 | src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout); 343 | ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels, src_nb_samples, src_sample_fmt, 0); 344 | if (ret < 0) { 345 | fprintf(stderr, "Could not allocate source samples\n"); 346 | return -1; 347 | } 348 | 349 | /* compute the number of converted samples: buffering is avoided 350 | * ensuring that the output buffer will contain at least all the 351 | * converted input samples */ 352 | max_dst_nb_samples = dst_nb_samples = av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); 353 | 354 | /* buffer is going to be directly written to a rawaudio file, no alignment */ 355 | dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout); 356 | ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, dst_nb_samples, dst_sample_fmt, 0); 357 | if (ret < 0) { 358 | fprintf(stderr, "Could not allocate destination samples\n"); 359 | return -1; 360 | } 361 | 362 | /* compute destination number of samples */ 363 | dst_nb_samples = av_rescale_rnd(swr_get_delay(is->sws_ctx_audio, src_rate) + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); 364 | 365 | /* convert to destination format */ 366 | ret = swr_convert(is->sws_ctx_audio, dst_data, dst_nb_samples, (const uint8_t **)decoded_frame.data, src_nb_samples); 367 | if (ret < 0) { 368 | fprintf(stderr, "Error while converting\n"); 369 | return -1; 370 | } 371 | 372 | dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret, dst_sample_fmt, 1); 373 | if (dst_bufsize < 0) { 374 | fprintf(stderr, "Could not get sample buffer size\n"); 375 | return -1; 376 | } 377 | 378 | memcpy(is->audio_buf, dst_data[0], dst_bufsize); 379 | 380 | if (src_data) { 381 | av_freep(&src_data[0]); 382 | } 383 | av_freep(&src_data); 384 | 385 | if (dst_data) { 386 | av_freep(&dst_data[0]); 387 | } 388 | av_freep(&dst_data); 389 | 390 | return dst_bufsize; 391 | } 392 | 393 | int audio_decode_frame(VideoState *is, double *pts_ptr) { 394 | 395 | int len1, data_size = 0, n; 396 | AVPacket *pkt = &is->audio_pkt; 397 | double pts; 398 | 399 | for(;;) { 400 | while(is->audio_pkt_size > 0) { 401 | int got_frame = 0; 402 | len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); 403 | if(len1 < 0) { 404 | /* if error, skip frame */ 405 | is->audio_pkt_size = 0; 406 | break; 407 | } 408 | if (got_frame) 409 | { 410 | if (is->audio_frame.format != AV_SAMPLE_FMT_S16) { 411 | data_size = decode_frame_from_packet(is, is->audio_frame); 412 | } else { 413 | data_size = 414 | av_samples_get_buffer_size 415 | ( 416 | NULL, 417 | is->audio_st->codec->channels, 418 | is->audio_frame.nb_samples, 419 | is->audio_st->codec->sample_fmt, 420 | 1 421 | ); 422 | memcpy(is->audio_buf, is->audio_frame.data[0], data_size); 423 | } 424 | } 425 | is->audio_pkt_data += len1; 426 | is->audio_pkt_size -= len1; 427 | if(data_size <= 0) { 428 | /* No data yet, get more frames */ 429 | continue; 430 | } 431 | pts = is->audio_clock; 432 | *pts_ptr = pts; 433 | n = 2 * is->audio_st->codec->channels; 434 | is->audio_clock += (double)data_size / 435 | (double)(n * is->audio_st->codec->sample_rate); 436 | 437 | /* We have data, return it and come back for more later */ 438 | return data_size; 439 | } 440 | if(pkt->data) 441 | av_free_packet(pkt); 442 | 443 | if(is->quit) { 444 | return -1; 445 | } 446 | /* next packet */ 447 | if(packet_queue_get(&is->audioq, pkt, 1) < 0) { 448 | return -1; 449 | } 450 | if(pkt->data == flush_pkt.data) { 451 | avcodec_flush_buffers(is->audio_st->codec); 452 | continue; 453 | } 454 | is->audio_pkt_data = pkt->data; 455 | is->audio_pkt_size = pkt->size; 456 | /* if update, update the audio clock w/pts */ 457 | if(pkt->pts != AV_NOPTS_VALUE) { 458 | is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; 459 | } 460 | } 461 | } 462 | 463 | void audio_callback(void *userdata, Uint8 *stream, int len) { 464 | 465 | VideoState *is = (VideoState *)userdata; 466 | int len1, audio_size; 467 | double pts; 468 | 469 | while(len > 0) { 470 | if(is->audio_buf_index >= is->audio_buf_size) { 471 | /* We have already sent all our data; get more */ 472 | audio_size = audio_decode_frame(is, &pts); 473 | if(audio_size < 0) { 474 | /* If error, output silence */ 475 | is->audio_buf_size = 1024; 476 | memset(is->audio_buf, 0, is->audio_buf_size); 477 | } else { 478 | audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, 479 | audio_size, pts); 480 | is->audio_buf_size = audio_size; 481 | } 482 | is->audio_buf_index = 0; 483 | } 484 | len1 = is->audio_buf_size - is->audio_buf_index; 485 | if(len1 > len) 486 | len1 = len; 487 | memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); 488 | len -= len1; 489 | stream += len1; 490 | is->audio_buf_index += len1; 491 | } 492 | } 493 | 494 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 495 | SDL_Event event; 496 | event.type = FF_REFRESH_EVENT; 497 | event.user.data1 = opaque; 498 | SDL_PushEvent(&event); 499 | return 0; /* 0 means stop timer */ 500 | } 501 | 502 | /* schedule a video refresh in 'delay' ms */ 503 | static void schedule_refresh(VideoState *is, int delay) { 504 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 505 | } 506 | 507 | void video_display(VideoState *is) { 508 | 509 | SDL_Rect rect; 510 | VideoPicture *vp; 511 | //AVPicture pict; 512 | float aspect_ratio; 513 | int w, h, x, y; 514 | //int i; 515 | 516 | vp = &is->pictq[is->pictq_rindex]; 517 | if(vp->bmp) { 518 | if(is->video_st->codec->sample_aspect_ratio.num == 0) { 519 | aspect_ratio = 0; 520 | } else { 521 | aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * 522 | is->video_st->codec->width / is->video_st->codec->height; 523 | } 524 | if(aspect_ratio <= 0.0) { 525 | aspect_ratio = (float)is->video_st->codec->width / 526 | (float)is->video_st->codec->height; 527 | } 528 | h = screen->h; 529 | w = ((int)rint(h * aspect_ratio)) & -3; 530 | if(w > screen->w) { 531 | w = screen->w; 532 | h = ((int)rint(w / aspect_ratio)) & -3; 533 | } 534 | x = (screen->w - w) / 2; 535 | y = (screen->h - h) / 2; 536 | 537 | rect.x = x; 538 | rect.y = y; 539 | rect.w = w; 540 | rect.h = h; 541 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 542 | } 543 | } 544 | 545 | void video_refresh_timer(void *userdata) { 546 | 547 | VideoState *is = (VideoState *)userdata; 548 | VideoPicture *vp; 549 | double actual_delay, delay, sync_threshold, ref_clock, diff; 550 | 551 | if(is->video_st) { 552 | if(is->pictq_size == 0) { 553 | schedule_refresh(is, 1); 554 | } else { 555 | vp = &is->pictq[is->pictq_rindex]; 556 | 557 | is->video_current_pts = vp->pts; 558 | is->video_current_pts_time = av_gettime(); 559 | 560 | delay = vp->pts - is->frame_last_pts; /* the pts from last time */ 561 | if(delay <= 0 || delay >= 1.0) { 562 | /* if incorrect delay, use previous one */ 563 | delay = is->frame_last_delay; 564 | } 565 | /* save for next time */ 566 | is->frame_last_delay = delay; 567 | is->frame_last_pts = vp->pts; 568 | 569 | /* update delay to sync to audio if not master source */ 570 | if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { 571 | ref_clock = get_master_clock(is); 572 | diff = vp->pts - ref_clock; 573 | 574 | /* Skip or repeat the frame. Take delay into account 575 | FFPlay still doesn't "know if this is the best guess." */ 576 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 577 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 578 | if(diff <= -sync_threshold) { 579 | delay = 0; 580 | } else if(diff >= sync_threshold) { 581 | delay = 2 * delay; 582 | } 583 | } 584 | } 585 | 586 | is->frame_timer += delay; 587 | /* computer the REAL delay */ 588 | actual_delay = is->frame_timer - (av_gettime() / 1000000.0); 589 | if(actual_delay < 0.010) { 590 | /* Really it should skip the picture instead */ 591 | actual_delay = 0.010; 592 | } 593 | schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); 594 | 595 | /* show the picture! */ 596 | video_display(is); 597 | 598 | /* update queue for next picture! */ 599 | if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 600 | is->pictq_rindex = 0; 601 | } 602 | SDL_LockMutex(is->pictq_mutex); 603 | is->pictq_size--; 604 | SDL_CondSignal(is->pictq_cond); 605 | SDL_UnlockMutex(is->pictq_mutex); 606 | } 607 | } else { 608 | schedule_refresh(is, 100); 609 | } 610 | } 611 | 612 | void alloc_picture(void *userdata) { 613 | 614 | VideoState *is = (VideoState *)userdata; 615 | VideoPicture *vp; 616 | 617 | vp = &is->pictq[is->pictq_windex]; 618 | if(vp->bmp) { 619 | // we already have one make another, bigger/smaller 620 | SDL_FreeYUVOverlay(vp->bmp); 621 | } 622 | // Allocate a place to put our YUV image on that screen 623 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 624 | is->video_st->codec->height, 625 | SDL_YV12_OVERLAY, 626 | screen); 627 | vp->width = is->video_st->codec->width; 628 | vp->height = is->video_st->codec->height; 629 | 630 | SDL_LockMutex(is->pictq_mutex); 631 | vp->allocated = 1; 632 | SDL_CondSignal(is->pictq_cond); 633 | SDL_UnlockMutex(is->pictq_mutex); 634 | 635 | } 636 | 637 | int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { 638 | 639 | VideoPicture *vp; 640 | //int dst_pix_fmt; 641 | AVPicture pict; 642 | 643 | /* wait until we have space for a new pic */ 644 | SDL_LockMutex(is->pictq_mutex); 645 | while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 646 | !is->quit) { 647 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 648 | } 649 | SDL_UnlockMutex(is->pictq_mutex); 650 | 651 | if(is->quit) 652 | return -1; 653 | 654 | // windex is set to 0 initially 655 | vp = &is->pictq[is->pictq_windex]; 656 | 657 | /* allocate or resize the buffer! */ 658 | if(!vp->bmp || 659 | vp->width != is->video_st->codec->width || 660 | vp->height != is->video_st->codec->height) { 661 | SDL_Event event; 662 | 663 | vp->allocated = 0; 664 | /* we have to do it in the main thread */ 665 | event.type = FF_ALLOC_EVENT; 666 | event.user.data1 = is; 667 | SDL_PushEvent(&event); 668 | 669 | /* wait until we have a picture allocated */ 670 | SDL_LockMutex(is->pictq_mutex); 671 | while(!vp->allocated && !is->quit) { 672 | SDL_CondWait(is->pictq_cond, is->pictq_mutex); 673 | } 674 | SDL_UnlockMutex(is->pictq_mutex); 675 | if(is->quit) { 676 | return -1; 677 | } 678 | } 679 | /* We have a place to put our picture on the queue */ 680 | /* If we are skipping a frame, do we set this to null 681 | but still return vp->allocated = 1? */ 682 | 683 | 684 | if(vp->bmp) { 685 | 686 | SDL_LockYUVOverlay(vp->bmp); 687 | 688 | //dst_pix_fmt = PIX_FMT_YUV420P; 689 | /* point pict at the queue */ 690 | 691 | pict.data[0] = vp->bmp->pixels[0]; 692 | pict.data[1] = vp->bmp->pixels[2]; 693 | pict.data[2] = vp->bmp->pixels[1]; 694 | 695 | pict.linesize[0] = vp->bmp->pitches[0]; 696 | pict.linesize[1] = vp->bmp->pitches[2]; 697 | pict.linesize[2] = vp->bmp->pitches[1]; 698 | 699 | // Convert the image into YUV format that SDL uses 700 | sws_scale 701 | ( 702 | is->sws_ctx, 703 | (uint8_t const * const *)pFrame->data, 704 | pFrame->linesize, 705 | 0, 706 | is->video_st->codec->height, 707 | pict.data, 708 | pict.linesize 709 | ); 710 | 711 | SDL_UnlockYUVOverlay(vp->bmp); 712 | vp->pts = pts; 713 | 714 | /* now we inform our display thread that we have a pic ready */ 715 | if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 716 | is->pictq_windex = 0; 717 | } 718 | SDL_LockMutex(is->pictq_mutex); 719 | is->pictq_size++; 720 | SDL_UnlockMutex(is->pictq_mutex); 721 | } 722 | return 0; 723 | } 724 | 725 | double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) { 726 | 727 | double frame_delay; 728 | 729 | if(pts != 0) { 730 | /* if we have pts, set video clock to it */ 731 | is->video_clock = pts; 732 | } else { 733 | /* if we aren't given a pts, set it to the clock */ 734 | pts = is->video_clock; 735 | } 736 | /* update the video clock */ 737 | frame_delay = av_q2d(is->video_st->codec->time_base); 738 | /* if we are repeating a frame, adjust clock accordingly */ 739 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); 740 | is->video_clock += frame_delay; 741 | return pts; 742 | } 743 | 744 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 745 | 746 | /* These are called whenever we allocate a frame 747 | * buffer. We use this to store the global_pts in 748 | * a frame at the time it is allocated. 749 | */ 750 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 751 | int ret = avcodec_default_get_buffer(c, pic); 752 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 753 | *pts = global_video_pkt_pts; 754 | pic->opaque = pts; 755 | return ret; 756 | } 757 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 758 | if(pic) av_freep(&pic->opaque); 759 | avcodec_default_release_buffer(c, pic); 760 | } 761 | 762 | int video_thread(void *arg) { 763 | VideoState *is = (VideoState *)arg; 764 | AVPacket pkt1, *packet = &pkt1; 765 | int frameFinished; 766 | AVFrame *pFrame; 767 | double pts; 768 | 769 | pFrame = av_frame_alloc(); 770 | 771 | for(;;) { 772 | if(packet_queue_get(&is->videoq, packet, 1) < 0) { 773 | // means we quit getting packets 774 | break; 775 | } 776 | if(packet->data == flush_pkt.data) { 777 | avcodec_flush_buffers(is->video_st->codec); 778 | continue; 779 | } 780 | pts = 0; 781 | 782 | // Save global pts to be stored in pFrame in first call 783 | global_video_pkt_pts = packet->pts; 784 | // Decode video frame 785 | avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, 786 | packet); 787 | if(packet->dts == AV_NOPTS_VALUE 788 | && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 789 | pts = *(uint64_t *)pFrame->opaque; 790 | } else if(packet->dts != AV_NOPTS_VALUE) { 791 | pts = packet->dts; 792 | } else { 793 | pts = 0; 794 | } 795 | pts *= av_q2d(is->video_st->time_base); 796 | 797 | // Did we get a video frame? 798 | if(frameFinished) { 799 | pts = synchronize_video(is, pFrame, pts); 800 | if(queue_picture(is, pFrame, pts) < 0) { 801 | break; 802 | } 803 | } 804 | av_free_packet(packet); 805 | } 806 | av_free(pFrame); 807 | return 0; 808 | } 809 | int stream_component_open(VideoState *is, int stream_index) { 810 | 811 | AVFormatContext *pFormatCtx = is->pFormatCtx; 812 | AVCodecContext *codecCtx = NULL; 813 | AVCodec *codec = NULL; 814 | AVDictionary *optionsDict = NULL; 815 | SDL_AudioSpec wanted_spec, spec; 816 | 817 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 818 | return -1; 819 | } 820 | 821 | // Get a pointer to the codec context for the video stream 822 | codecCtx = pFormatCtx->streams[stream_index]->codec; 823 | 824 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 825 | // Set audio settings from codec info 826 | wanted_spec.freq = codecCtx->sample_rate; 827 | wanted_spec.format = AUDIO_S16SYS; 828 | wanted_spec.channels = codecCtx->channels; 829 | wanted_spec.silence = 0; 830 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 831 | wanted_spec.callback = audio_callback; 832 | wanted_spec.userdata = is; 833 | 834 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 835 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 836 | return -1; 837 | } 838 | is->audio_hw_buf_size = spec.size; 839 | } 840 | codec = avcodec_find_decoder(codecCtx->codec_id); 841 | if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { 842 | fprintf(stderr, "Unsupported codec!\n"); 843 | return -1; 844 | } 845 | 846 | switch(codecCtx->codec_type) { 847 | case AVMEDIA_TYPE_AUDIO: 848 | is->audioStream = stream_index; 849 | is->audio_st = pFormatCtx->streams[stream_index]; 850 | is->audio_buf_size = 0; 851 | is->audio_buf_index = 0; 852 | 853 | /* averaging filter for audio sync */ 854 | is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); 855 | is->audio_diff_avg_count = 0; 856 | /* Correct audio only if larger error than this */ 857 | is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; 858 | 859 | is->sws_ctx_audio = swr_alloc(); 860 | if (!is->sws_ctx_audio) { 861 | fprintf(stderr, "Could not allocate resampler context\n"); 862 | return -1; 863 | } 864 | 865 | memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); 866 | packet_queue_init(&is->audioq); 867 | SDL_PauseAudio(0); 868 | break; 869 | case AVMEDIA_TYPE_VIDEO: 870 | is->videoStream = stream_index; 871 | is->video_st = pFormatCtx->streams[stream_index]; 872 | 873 | is->frame_timer = (double)av_gettime() / 1000000.0; 874 | is->frame_last_delay = 40e-3; 875 | is->video_current_pts_time = av_gettime(); 876 | 877 | packet_queue_init(&is->videoq); 878 | is->video_tid = SDL_CreateThread(video_thread, is); 879 | is->sws_ctx = 880 | sws_getContext 881 | ( 882 | is->video_st->codec->width, 883 | is->video_st->codec->height, 884 | is->video_st->codec->pix_fmt, 885 | is->video_st->codec->width, 886 | is->video_st->codec->height, 887 | AV_PIX_FMT_YUV420P, 888 | SWS_BILINEAR, 889 | NULL, 890 | NULL, 891 | NULL 892 | ); 893 | codecCtx->get_buffer2 = our_get_buffer; 894 | codecCtx->release_buffer = our_release_buffer; 895 | 896 | break; 897 | default: 898 | break; 899 | } 900 | 901 | return 0; 902 | } 903 | 904 | int decode_interrupt_cb(void *opaque) { 905 | return (global_video_state && global_video_state->quit); 906 | } 907 | int decode_thread(void *arg) { 908 | 909 | VideoState *is = (VideoState *)arg; 910 | AVFormatContext *pFormatCtx = NULL; 911 | AVPacket pkt1, *packet = &pkt1; 912 | 913 | AVDictionary *io_dict = NULL; 914 | AVIOInterruptCB callback; 915 | 916 | int video_index = -1; 917 | int audio_index = -1; 918 | int i; 919 | 920 | is->videoStream=-1; 921 | is->audioStream=-1; 922 | 923 | global_video_state = is; 924 | // will interrupt blocking functions if we quit! 925 | callback.callback = decode_interrupt_cb; 926 | callback.opaque = is; 927 | if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) 928 | { 929 | fprintf(stderr, "Unable to open I/O for %s\n", is->filename); 930 | return -1; 931 | } 932 | 933 | // Open video file 934 | if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) 935 | return -1; // Couldn't open file 936 | 937 | is->pFormatCtx = pFormatCtx; 938 | 939 | // Retrieve stream information 940 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 941 | return -1; // Couldn't find stream information 942 | 943 | // Dump information about file onto standard error 944 | av_dump_format(pFormatCtx, 0, is->filename, 0); 945 | 946 | // Find the first video stream 947 | for(i=0; inb_streams; i++) { 948 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 949 | video_index < 0) { 950 | video_index=i; 951 | } 952 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 953 | audio_index < 0) { 954 | audio_index=i; 955 | } 956 | } 957 | if(audio_index >= 0) { 958 | stream_component_open(is, audio_index); 959 | } 960 | if(video_index >= 0) { 961 | stream_component_open(is, video_index); 962 | } 963 | 964 | if(is->videoStream < 0 || is->audioStream < 0) { 965 | fprintf(stderr, "%s: could not open codecs\n", is->filename); 966 | goto fail; 967 | } 968 | 969 | // main decode loop 970 | 971 | for(;;) { 972 | if(is->quit) { 973 | break; 974 | } 975 | // seek stuff goes here 976 | if(is->seek_req) { 977 | int stream_index= -1; 978 | int64_t seek_target = is->seek_pos; 979 | 980 | if (is->videoStream >= 0) stream_index = is->videoStream; 981 | else if(is->audioStream >= 0) stream_index = is->audioStream; 982 | 983 | if(stream_index>=0){ 984 | seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); 985 | } 986 | if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { 987 | fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); 988 | } else { 989 | if(is->audioStream >= 0) { 990 | packet_queue_flush(&is->audioq); 991 | packet_queue_put(&is->audioq, &flush_pkt); 992 | } 993 | if(is->videoStream >= 0) { 994 | packet_queue_flush(&is->videoq); 995 | packet_queue_put(&is->videoq, &flush_pkt); 996 | } 997 | } 998 | is->seek_req = 0; 999 | } 1000 | 1001 | if(is->audioq.size > MAX_AUDIOQ_SIZE || 1002 | is->videoq.size > MAX_VIDEOQ_SIZE) { 1003 | SDL_Delay(10); 1004 | continue; 1005 | } 1006 | if(av_read_frame(is->pFormatCtx, packet) < 0) { 1007 | if(is->pFormatCtx->pb->error == 0) { 1008 | SDL_Delay(100); /* no error; wait for user input */ 1009 | continue; 1010 | } else { 1011 | break; 1012 | } 1013 | } 1014 | // Is this a packet from the video stream? 1015 | if(packet->stream_index == is->videoStream) { 1016 | packet_queue_put(&is->videoq, packet); 1017 | } else if(packet->stream_index == is->audioStream) { 1018 | packet_queue_put(&is->audioq, packet); 1019 | } else { 1020 | av_free_packet(packet); 1021 | } 1022 | } 1023 | /* all done - wait for it */ 1024 | while(!is->quit) { 1025 | SDL_Delay(100); 1026 | } 1027 | fail: 1028 | { 1029 | SDL_Event event; 1030 | event.type = FF_QUIT_EVENT; 1031 | event.user.data1 = is; 1032 | SDL_PushEvent(&event); 1033 | } 1034 | return 0; 1035 | } 1036 | 1037 | void stream_seek(VideoState *is, int64_t pos, int rel) { 1038 | 1039 | if(!is->seek_req) { 1040 | is->seek_pos = pos; 1041 | is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; 1042 | is->seek_req = 1; 1043 | } 1044 | } 1045 | int main(int argc, char *argv[]) { 1046 | //int main(void) { 1047 | 1048 | SDL_Event event; 1049 | //double pts; 1050 | VideoState *is; 1051 | 1052 | is = av_mallocz(sizeof(VideoState)); 1053 | 1054 | if(argc < 2) { 1055 | fprintf(stderr, "Usage: test \n"); 1056 | exit(1); 1057 | } 1058 | // Register all formats and codecs 1059 | av_register_all(); 1060 | 1061 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 1062 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 1063 | exit(1); 1064 | } 1065 | 1066 | // Make a screen to put our video 1067 | #ifndef __DARWIN__ 1068 | screen = SDL_SetVideoMode(640, 480, 0, 0); 1069 | #else 1070 | screen = SDL_SetVideoMode(640, 480, 24, 0); 1071 | #endif 1072 | if(!screen) { 1073 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 1074 | exit(1); 1075 | } 1076 | 1077 | av_strlcpy(is->filename, argv[1], 1024); 1078 | 1079 | is->pictq_mutex = SDL_CreateMutex(); 1080 | is->pictq_cond = SDL_CreateCond(); 1081 | 1082 | schedule_refresh(is, 40); 1083 | 1084 | is->av_sync_type = DEFAULT_AV_SYNC_TYPE; 1085 | is->parse_tid = SDL_CreateThread(decode_thread, is); 1086 | if(!is->parse_tid) { 1087 | av_free(is); 1088 | return -1; 1089 | } 1090 | 1091 | av_init_packet(&flush_pkt); 1092 | flush_pkt.data = (unsigned char *)"FLUSH"; 1093 | 1094 | for(;;) { 1095 | double incr, pos; 1096 | SDL_WaitEvent(&event); 1097 | switch(event.type) { 1098 | case SDL_KEYDOWN: 1099 | switch(event.key.keysym.sym) { 1100 | case SDLK_LEFT: 1101 | incr = -10.0; 1102 | goto do_seek; 1103 | case SDLK_RIGHT: 1104 | incr = 10.0; 1105 | goto do_seek; 1106 | case SDLK_UP: 1107 | incr = 60.0; 1108 | goto do_seek; 1109 | case SDLK_DOWN: 1110 | incr = -60.0; 1111 | goto do_seek; 1112 | do_seek: 1113 | if(global_video_state) { 1114 | pos = get_master_clock(global_video_state); 1115 | pos += incr; 1116 | stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); 1117 | } 1118 | break; 1119 | default: 1120 | break; 1121 | } 1122 | break; 1123 | case FF_QUIT_EVENT: 1124 | case SDL_QUIT: 1125 | is->quit = 1; 1126 | /* 1127 | * If the video has finished playing, then both the picture and 1128 | * audio queues are waiting for more data. Make them stop 1129 | * waiting and terminate normally. 1130 | */ 1131 | SDL_CondSignal(is->audioq.cond); 1132 | SDL_CondSignal(is->videoq.cond); 1133 | SDL_Quit(); 1134 | exit(0); 1135 | break; 1136 | case FF_ALLOC_EVENT: 1137 | alloc_picture(event.user.data1); 1138 | break; 1139 | case FF_REFRESH_EVENT: 1140 | video_refresh_timer(event.user.data1); 1141 | break; 1142 | default: 1143 | break; 1144 | } 1145 | } 1146 | return 0; 1147 | } 1148 | --------------------------------------------------------------------------------