├── .gitignore ├── Makefile ├── README.md ├── tutorial01.c ├── tutorial02.c ├── tutorial03.c ├── tutorial04.c ├── tutorial05.c ├── tutorial06.c └── tutorial07.c /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | 6 | # Compiled Dynamic libraries 7 | *.so 8 | 9 | # Compiled Static libraries 10 | *.lai 11 | *.la 12 | *.a 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # use pkg-config for getting CFLAGS and LDLIBS 2 | FFMPEG_LIBS= libavdevice \ 3 | libavformat \ 4 | libavfilter \ 5 | libavcodec \ 6 | libswresample \ 7 | libswscale \ 8 | libavutil \ 9 | 10 | CFLAGS += -Wall -O2 -g 11 | CFLAGS := $(shell /usr/local/bin/pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS) 12 | CFLAGS := $(shell /usr/X11/bin/freetype-config --cflags) $(CFLAGS) 13 | CFLAGS := $(shell /usr/local/bin/sdl-config --cflags) $(CFLAGS) 14 | 15 | LDLIBS := $(shell /usr/local/bin/pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS) 16 | LDLIBS := $(shell /usr/X11/bin/freetype-config --libs) $(LDLIBS) 17 | LDLIBS := $(shell /usr/local/bin/sdl-config --libs) $(LDLIBS) 18 | 19 | EXAMPLES= tutorial01 \ 20 | tutorial02 \ 21 | tutorial03 \ 22 | tutorial04 \ 23 | tutorial05 \ 24 | tutorial06 \ 25 | tutorial07 26 | 27 | OBJS=$(addsuffix .o,$(EXAMPLES)) 28 | 29 | # the following examples make explicit use of the math library 30 | decoding_encoding: LDLIBS += -lm 31 | muxing: LDLIBS += -lm 32 | 33 | .phony: all clean-test clean 34 | 35 | all: $(OBJS) $(EXAMPLES) 36 | 37 | clean-test: 38 | $(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg 39 | 40 | clean: clean-test 41 | $(RM) $(EXAMPLES) $(OBJS) 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | FFmpeg-tutorial-samples 2 | ======================= 3 | 4 | The update version of source code from http://dranger.com/ffmpeg/ffmpeg.html 5 | 6 | - Tested with MacOS x 10.7.5 Xcode 4.5 7 | - Toturial08's code, software scaling, is used throughout all the examples, so there is no code for tutorial08 -------------------------------------------------------------------------------- /tutorial01.c: -------------------------------------------------------------------------------- 1 | // tutorial01.c 2 | // Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 3 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 4 | 5 | // A small sample program that shows how to use libavformat and libavcodec to 6 | // read video from a file. 7 | // 8 | // Use 9 | // 10 | // gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz 11 | // 12 | // to build (assuming libavformat and libavcodec are correctly installed 13 | // your system). 14 | // 15 | // Run using 16 | // 17 | // tutorial01 myvideofile.mpg 18 | // 19 | // to write the first five frames from "myvideofile.mpg" to disk in PPM 20 | // format. 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) { 28 | FILE *pFile; 29 | char szFilename[32]; 30 | int y; 31 | 32 | // Open file 33 | sprintf(szFilename, "frame%d.ppm", iFrame); 34 | pFile=fopen(szFilename, "wb"); 35 | if(pFile==NULL) 36 | return; 37 | 38 | // Write header 39 | fprintf(pFile, "P6\n%d %d\n255\n", width, height); 40 | 41 | // Write pixel data 42 | for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile); 44 | 45 | // Close file 46 | fclose(pFile); 47 | } 48 | 49 | #pragma mark - Main function 50 | int main(int argc, char *argv[]) { 51 | AVFormatContext *pFormatCtx; 52 | int i, videoStreamIdx; 53 | AVCodecContext *pCodecCtx; 54 | AVCodec *pCodec; 55 | AVFrame *pFrame; 56 | AVFrame *pFrameRGB; 57 | AVPacket packet; 58 | int frameFinished; 59 | int numBytes; 60 | uint8_t *buffer; 61 | static struct SwsContext *img_convert_ctx; 62 | 63 | if(argc < 2) { 64 | printf("Please provide a movie file\n"); 65 | return -1; 66 | } 67 | // Register all formats and codecs 68 | av_register_all(); 69 | 70 | 71 | /// Open video file 72 | //if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0) // Deprecated 73 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) 74 | return -1; // Couldn't open file 75 | 76 | /// Retrieve stream information 77 | //if(av_find_stream_info(pFormatCtx)<0) // Deprecated 78 | if(avformat_find_stream_info(pFormatCtx, NULL) < 0) 79 | return -1; // Couldn't find stream information 80 | 81 | 82 | /// Dump information about file onto standard error 83 | av_dump_format(pFormatCtx, 0, argv[1], 0); 84 | 85 | 86 | /// Find the first video stream 87 | videoStreamIdx=-1; 88 | for(i=0; inb_streams; i++) 89 | if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { //CODEC_TYPE_VIDEO 90 | videoStreamIdx=i; 91 | break; 92 | } 93 | if(videoStreamIdx==-1) 94 | return -1; // Didn't find a video stream 95 | 96 | 97 | /// Get a pointer to the codec context for the video stream 98 | pCodecCtx = pFormatCtx->streams[videoStreamIdx]->codec; 99 | 100 | 101 | /// Find the decoder for the video stream 102 | pCodec = avcodec_find_decoder( pCodecCtx->codec_id); 103 | if(pCodec==NULL) { 104 | fprintf(stderr, "Unsupported codec!\n"); 105 | return -1; // Codec not found 106 | } 107 | /// Open codec 108 | //if( avcodec_open(pCodecCtx, pCodec) < 0 ) -- Deprecated 109 | if( avcodec_open2(pCodecCtx, pCodec, NULL) < 0 ) 110 | return -1; // Could not open codec 111 | 112 | /// Allocate video frame 113 | pFrame = avcodec_alloc_frame(); 114 | 115 | 116 | /// Allocate an AVFrame structure 117 | pFrameRGB = avcodec_alloc_frame(); 118 | if(pFrameRGB==NULL) 119 | return -1; 120 | 121 | 122 | /// Determine required buffer size and allocate buffer 123 | numBytes = avpicture_get_size(PIX_FMT_RGB24, 124 | pCodecCtx->width, 125 | pCodecCtx->height); 126 | 127 | buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t)); 128 | 129 | 130 | /// Assign appropriate parts of buffer to image planes in pFrameRGB 131 | // Note that pFrameRGB is an AVFrame, but AVFrame is a superset 132 | // of AVPicture 133 | avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 134 | pCodecCtx->width, pCodecCtx->height); 135 | 136 | 137 | int w = pCodecCtx->width; 138 | int h = pCodecCtx->height; 139 | img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, 140 | w, h, PIX_FMT_RGB24, 141 | SWS_BICUBIC, NULL, NULL, NULL); 142 | // Read frames and save first five frames to disk 143 | i=0; 144 | while((av_read_frame(pFormatCtx, &packet)>=0) && (i<5)) { 145 | // Is this a packet from the video stream? 146 | if(packet.stream_index==videoStreamIdx) { 147 | 148 | /// Decode video frame 149 | //avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size); 150 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 151 | 152 | // Did we get a video frame? 153 | if(frameFinished) { 154 | i++; 155 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 156 | pFrame->linesize, 0, pCodecCtx->height, 157 | pFrameRGB->data, pFrameRGB->linesize); 158 | SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i); 159 | } 160 | } 161 | 162 | // Free the packet that was allocated by av_read_frame 163 | av_free_packet(&packet); 164 | } 165 | 166 | 167 | // Free the RGB image 168 | av_free(buffer); 169 | av_free(pFrameRGB); 170 | 171 | // Free the YUV frame 172 | av_free(pFrame); 173 | 174 | // Close the codec 175 | avcodec_close(pCodecCtx); 176 | 177 | // Close the video file 178 | avformat_close_input(&pFormatCtx); 179 | 180 | //*/ 181 | return 0; 182 | } 183 | -------------------------------------------------------------------------------- /tutorial02.c: -------------------------------------------------------------------------------- 1 | // tutorial02.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can. 3 | // 4 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 5 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 6 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 7 | // Use 8 | // 9 | // gcc -o tutorial02 tutorial02.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 10 | // to build (assuming libavformat and libavcodec are correctly installed, 11 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 12 | // 13 | // Run using 14 | // tutorial02 myvideofile.mpg 15 | // 16 | // to play the video stream on your screen. 17 | 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | #include 25 | 26 | #ifdef __MINGW32__ 27 | #undef main /* Prevents SDL from overriding main() */ 28 | #endif 29 | 30 | #include 31 | 32 | int main(int argc, char *argv[]) { 33 | AVFormatContext *pFormatCtx; 34 | int i, videoStream; 35 | AVCodecContext *pCodecCtx; 36 | AVCodec *pCodec; 37 | AVFrame *pFrame; 38 | AVPacket packet; 39 | int frameFinished; 40 | static struct SwsContext *img_convert_ctx; 41 | 42 | /// SDL variables 43 | SDL_Overlay *bmp; 44 | SDL_Surface *screen; 45 | SDL_Rect rect; 46 | SDL_Event event; 47 | 48 | if(argc < 2) { 49 | fprintf(stderr, "Usage: test \n"); 50 | exit(1); 51 | } 52 | // Register all formats and codecs 53 | av_register_all(); 54 | 55 | 56 | /// Init SDL 57 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 58 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 59 | exit(1); 60 | } 61 | 62 | // Open video file 63 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 64 | return -1; // Couldn't open file 65 | 66 | // Retrieve stream information 67 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 68 | return -1; // Couldn't find stream information 69 | 70 | // Dump information about file onto standard error 71 | av_dump_format(pFormatCtx, 0, argv[1], 0); 72 | 73 | // Find the first video stream 74 | videoStream=-1; 75 | for(i=0; inb_streams; i++) 76 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 77 | videoStream=i; 78 | break; 79 | } 80 | if(videoStream==-1) 81 | return -1; // Didn't find a video stream 82 | 83 | // Get a pointer to the codec context for the video stream 84 | pCodecCtx=pFormatCtx->streams[videoStream]->codec; 85 | 86 | // Find the decoder for the video stream 87 | pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 88 | if(pCodec==NULL) { 89 | fprintf(stderr, "Unsupported codec!\n"); 90 | return -1; // Codec not found 91 | } 92 | 93 | // Open codec 94 | if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) 95 | return -1; // Could not open codec 96 | 97 | // Allocate video frame 98 | pFrame=avcodec_alloc_frame(); 99 | 100 | /// Make a screen to put our video 101 | #ifndef __DARWIN__ 102 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); 103 | #else 104 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); 105 | #endif 106 | if(!screen) { 107 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 108 | exit(1); 109 | } 110 | 111 | /// Allocate a place to put our YUV image on that screen 112 | bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 113 | pCodecCtx->height, 114 | SDL_YV12_OVERLAY, 115 | screen); 116 | 117 | 118 | int w = pCodecCtx->width; 119 | int h = pCodecCtx->height; 120 | img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, 121 | w, h, PIX_FMT_YUV420P, 122 | SWS_BICUBIC, NULL, NULL, NULL); 123 | i=0; 124 | while(av_read_frame(pFormatCtx, &packet)>=0) { 125 | // Is this a packet from the video stream? 126 | if(packet.stream_index==videoStream) { 127 | // Decode video frame 128 | //avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); 129 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 130 | 131 | // Did we get a video frame? 132 | if(frameFinished) { 133 | SDL_LockYUVOverlay(bmp); 134 | 135 | AVPicture pict; 136 | pict.data[0] = bmp->pixels[0]; 137 | pict.data[1] = bmp->pixels[2]; 138 | pict.data[2] = bmp->pixels[1]; 139 | 140 | pict.linesize[0] = bmp->pitches[0]; 141 | pict.linesize[1] = bmp->pitches[2]; 142 | pict.linesize[2] = bmp->pitches[1]; 143 | 144 | // Convert the image into YUV format that SDL uses 145 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 146 | pFrame->linesize, 0, pCodecCtx->height, 147 | pict.data, pict.linesize); 148 | 149 | SDL_UnlockYUVOverlay(bmp); 150 | 151 | rect.x = 0; 152 | rect.y = 0; 153 | rect.w = pCodecCtx->width; 154 | rect.h = pCodecCtx->height; 155 | SDL_DisplayYUVOverlay(bmp, &rect); 156 | 157 | } 158 | } 159 | 160 | // Free the packet that was allocated by av_read_frame 161 | av_free_packet(&packet); 162 | SDL_PollEvent(&event); 163 | switch(event.type) { 164 | case SDL_QUIT: 165 | SDL_Quit(); 166 | exit(0); 167 | break; 168 | default: 169 | break; 170 | } 171 | 172 | } 173 | 174 | // Free the YUV frame 175 | av_free(pFrame); 176 | 177 | // Close the codec 178 | avcodec_close(pCodecCtx); 179 | 180 | // Close the video file 181 | avformat_close_input(&pFormatCtx); 182 | 183 | printf("Finished tutorial 02"); 184 | return 0; 185 | } 186 | -------------------------------------------------------------------------------- /tutorial03.c: -------------------------------------------------------------------------------- 1 | // tutorial03.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can 3 | // and play audio (out of sync). 4 | // 5 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 6 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 7 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 8 | // Use 9 | // 10 | // gcc -o tutorial03 tutorial03.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 11 | // to build (assuming libavformat and libavcodec are correctly installed, 12 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 13 | // 14 | // Run using 15 | // tutorial03 myvideofile.mpg 16 | // 17 | // to play the stream on your screen. 18 | 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | 27 | #ifdef __MINGW32__ 28 | #undef main /* Prevents SDL from overriding main() */ 29 | #endif 30 | 31 | #include 32 | 33 | #define SDL_AUDIO_BUFFER_SIZE 1024 34 | 35 | 36 | #pragma mark - Packet Queue Impelementation (for Audio stream) 37 | typedef struct PacketQueue { 38 | AVPacketList *first_pkt, *last_pkt; 39 | int nb_packets; 40 | int size; 41 | SDL_mutex *mutex; 42 | SDL_cond *cond; 43 | } PacketQueue; 44 | 45 | PacketQueue audioq; 46 | 47 | int quit = 0; 48 | 49 | void packet_queue_init(PacketQueue *q) { 50 | memset(q, 0, sizeof(PacketQueue)); 51 | q->mutex = SDL_CreateMutex(); 52 | q->cond = SDL_CreateCond(); 53 | } 54 | 55 | /** 56 | SDL_LockMutex() locks the mutex in the queue so we can add something to it, 57 | and then SDL_CondSignal() sends a signal to our get function (if it is 58 | waiting) through our condition variable to tell it that there is data and 59 | it can proceed, then unlocks the mutex to let it go. 60 | */ 61 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 62 | 63 | AVPacketList *pkt1; 64 | if(av_dup_packet(pkt) < 0) { 65 | return -1; 66 | } 67 | pkt1 = av_malloc(sizeof(AVPacketList)); 68 | if (!pkt1) 69 | return -1; 70 | pkt1->pkt = *pkt; 71 | pkt1->next = NULL; 72 | 73 | 74 | SDL_LockMutex(q->mutex); 75 | 76 | if (!q->last_pkt) 77 | q->first_pkt = pkt1; 78 | else 79 | q->last_pkt->next = pkt1; 80 | q->last_pkt = pkt1; 81 | q->nb_packets++; 82 | q->size += pkt1->pkt.size; 83 | SDL_CondSignal(q->cond); 84 | 85 | SDL_UnlockMutex(q->mutex); 86 | return 0; 87 | } 88 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 89 | { 90 | AVPacketList *pkt1; 91 | int ret; 92 | 93 | SDL_LockMutex(q->mutex); 94 | 95 | for(;;) { 96 | 97 | if(quit) { 98 | ret = -1; 99 | break; 100 | } 101 | 102 | pkt1 = q->first_pkt; 103 | if (pkt1) { 104 | q->first_pkt = pkt1->next; 105 | if (!q->first_pkt) 106 | q->last_pkt = NULL; 107 | q->nb_packets--; 108 | q->size -= pkt1->pkt.size; 109 | *pkt = pkt1->pkt; 110 | av_free(pkt1); 111 | ret = 1; 112 | break; 113 | } else if (!block) { 114 | ret = 0; 115 | break; 116 | } else { 117 | SDL_CondWait(q->cond, q->mutex); 118 | } 119 | } 120 | SDL_UnlockMutex(q->mutex); 121 | return ret; 122 | } 123 | 124 | 125 | #pragma mark - Audio decoding functions 126 | int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { 127 | static AVFrame *decoded_aframe; 128 | static AVPacket pkt, pktTemp; 129 | // static uint8_t *audio_pkt_data = NULL; 130 | // static int audio_pkt_size = 0; 131 | 132 | int len1, data_size; 133 | 134 | for(;;) { 135 | 136 | while(pktTemp.size > 0) 137 | { 138 | int got_frame = 0; 139 | 140 | if (!decoded_aframe) { 141 | if (!(decoded_aframe = avcodec_alloc_frame())) { 142 | fprintf(stderr, "out of memory\n"); 143 | exit(1); 144 | } 145 | } else 146 | avcodec_get_frame_defaults(decoded_aframe); 147 | 148 | //data_size = buf_size; /// ???? 149 | len1 = avcodec_decode_audio4(aCodecCtx, decoded_aframe, &got_frame, &pktTemp); 150 | 151 | 152 | /// Check if 153 | if (len1 < 0) { 154 | pktTemp.size = 0; 155 | break; // skip packet 156 | } 157 | 158 | 159 | if (got_frame) { 160 | printf("\nGot frame!"); 161 | //printf("\nFrame data size: %d", sizeof(decoded_aframe->data[0])); 162 | data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, 163 | decoded_aframe->nb_samples, 164 | aCodecCtx->sample_fmt, 1); 165 | if (data_size > buf_size) { 166 | data_size = buf_size; 167 | } 168 | memcpy(audio_buf, decoded_aframe->data[0], data_size); 169 | 170 | }else{ 171 | data_size = 0; 172 | } 173 | 174 | printf("\nData size %d", data_size); 175 | pktTemp.data += len1; 176 | pktTemp.size -= len1; 177 | 178 | if (data_size <= 0) { 179 | continue; 180 | } 181 | 182 | return data_size; 183 | /* Deprecated 184 | data_size = buf_size; 185 | len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, 186 | audio_pkt_data, audio_pkt_size); 187 | if(len1 < 0) { 188 | // if error, skip frame 189 | audio_pkt_size = 0; 190 | break; 191 | } 192 | audio_pkt_data += len1; 193 | audio_pkt_size -= len1; 194 | if(data_size <= 0) { 195 | // No data yet, get more frames 196 | continue; 197 | } 198 | // We have data, return it and come back for more later 199 | return data_size; 200 | */ 201 | } 202 | 203 | 204 | if(pkt.data) 205 | av_free_packet(&pkt); 206 | 207 | if(quit) 208 | { 209 | return -1; 210 | } 211 | 212 | 213 | /// Get packet from queue 214 | if(packet_queue_get(&audioq, &pkt, 1) < 0) 215 | { 216 | return -1; 217 | } 218 | 219 | 220 | av_init_packet(&pktTemp); 221 | 222 | pktTemp.data = pkt.data; 223 | pktTemp.size = pkt.size; 224 | } 225 | } 226 | 227 | /** 228 | This call back function is called from thread that spawned by SDL_openAudio() 229 | userdata: pointer to audio codec context 230 | stream: buffer that we need to write data to 231 | len: size of the the buffer 232 | */ 233 | void audio_callback(void *userdata, Uint8 *stream, int len) { 234 | 235 | AVCodecContext *aCodecCtx = (AVCodecContext *)userdata; 236 | int len1, audio_size; 237 | 238 | static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; 239 | static unsigned int audio_buf_size = 0; 240 | static unsigned int audio_buf_index = 0; 241 | 242 | while(len > 0) { 243 | /// Need to read more audio data 244 | if(audio_buf_index >= audio_buf_size) { 245 | /* We have already sent all our data; get more */ 246 | audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf)); 247 | if(audio_size < 0) { 248 | /* If error, output silence */ 249 | audio_buf_size = 1024; // arbitrary? 250 | memset(audio_buf, 0, audio_buf_size); 251 | } else { 252 | audio_buf_size = audio_size; 253 | } 254 | audio_buf_index = 0; 255 | } 256 | 257 | len1 = audio_buf_size - audio_buf_index; 258 | if(len1 > len) 259 | len1 = len; 260 | memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1); 261 | len -= len1; 262 | stream += len1; 263 | audio_buf_index += len1; 264 | } 265 | } 266 | 267 | 268 | #pragma mark - Main function 269 | 270 | int main(int argc, char *argv[]) { 271 | AVFormatContext *pFormatCtx; 272 | int i, videoStream, audioStream; 273 | AVCodecContext *pCodecCtx; 274 | AVCodec *pCodec; 275 | AVFrame *pFrame; 276 | AVPacket packet; 277 | int frameFinished; 278 | static struct SwsContext *img_convert_ctx; 279 | 280 | AVCodecContext *aCodecCtx; 281 | AVCodec *aCodec; 282 | 283 | SDL_Overlay *bmp; 284 | SDL_Surface *screen; 285 | SDL_Rect rect; 286 | SDL_Event event; 287 | SDL_AudioSpec wanted_spec, spec; 288 | 289 | if(argc < 2) { 290 | fprintf(stderr, "Usage: test \n"); 291 | exit(1); 292 | } 293 | // Register all formats and codecs 294 | av_register_all(); 295 | 296 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 297 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 298 | exit(1); 299 | } 300 | 301 | // Open video file 302 | if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) 303 | return -1; // Couldn't open file 304 | 305 | // Retrieve stream information 306 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 307 | return -1; // Couldn't find stream information 308 | 309 | // Dump information about file onto standard error 310 | av_dump_format(pFormatCtx, 0, argv[1], 0); 311 | 312 | /// Find the first video and audio stream 313 | videoStream=-1; 314 | audioStream=-1; 315 | for(i=0; inb_streams; i++) { 316 | if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 317 | videoStream < 0) { 318 | videoStream=i; 319 | } 320 | if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && 321 | audioStream < 0) { 322 | audioStream=i; 323 | } 324 | } 325 | if(videoStream==-1) 326 | return -1; // Didn't find a video stream 327 | if(audioStream==-1) 328 | return -1; 329 | 330 | aCodecCtx=pFormatCtx->streams[audioStream]->codec; 331 | 332 | /// Set audio settings from codec info 333 | wanted_spec.freq = aCodecCtx->sample_rate; 334 | wanted_spec.format = AUDIO_S16SYS; 335 | wanted_spec.channels = aCodecCtx->channels; 336 | wanted_spec.silence = 0; 337 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 338 | wanted_spec.callback = audio_callback; ///<----- Callback to feed audio data 339 | wanted_spec.userdata = aCodecCtx; 340 | 341 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 342 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 343 | return -1; 344 | } 345 | 346 | /// Find audio codec 347 | aCodec = avcodec_find_decoder(aCodecCtx->codec_id); 348 | if(!aCodec) { 349 | fprintf(stderr, "Unsupported codec!\n"); 350 | return -1; 351 | } 352 | 353 | /// Open codec 354 | if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) 355 | return -1; 356 | 357 | // audio_st = pFormatCtx->streams[index] 358 | packet_queue_init(&audioq); 359 | SDL_PauseAudio(0); 360 | 361 | // Get a pointer to the codec context for the video stream 362 | pCodecCtx=pFormatCtx->streams[videoStream]->codec; 363 | 364 | // Find the decoder for the video stream 365 | pCodec=avcodec_find_decoder(pCodecCtx->codec_id); 366 | if(pCodec==NULL) { 367 | fprintf(stderr, "Unsupported codec!\n"); 368 | return -1; // Codec not found 369 | } 370 | // Open codec 371 | if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) 372 | return -1; // Could not open codec 373 | 374 | // Allocate video frame 375 | pFrame=avcodec_alloc_frame(); 376 | 377 | // Make a screen to put our video 378 | 379 | #ifndef __DARWIN__ 380 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); 381 | #else 382 | screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); 383 | #endif 384 | if(!screen) { 385 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 386 | exit(1); 387 | } 388 | 389 | // Allocate a place to put our YUV image on that screen 390 | bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 391 | pCodecCtx->height, 392 | SDL_YV12_OVERLAY, 393 | screen); 394 | 395 | 396 | int w = pCodecCtx->width; 397 | int h = pCodecCtx->height; 398 | img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, 399 | w, h, PIX_FMT_YUV420P, 400 | SWS_BICUBIC, NULL, NULL, NULL); 401 | 402 | // Read frames and save first five frames to disk 403 | i=0; 404 | while(av_read_frame(pFormatCtx, &packet)>=0) { 405 | /// Is this a packet from the video stream? 406 | if(packet.stream_index==videoStream) { 407 | // Decode video frame 408 | avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 409 | 410 | // Did we get a video frame? 411 | if(frameFinished) { 412 | SDL_LockYUVOverlay(bmp); 413 | 414 | AVPicture pict; 415 | pict.data[0] = bmp->pixels[0]; 416 | pict.data[1] = bmp->pixels[2]; 417 | pict.data[2] = bmp->pixels[1]; 418 | 419 | pict.linesize[0] = bmp->pitches[0]; 420 | pict.linesize[1] = bmp->pitches[2]; 421 | pict.linesize[2] = bmp->pitches[1]; 422 | 423 | // Convert the image into YUV format that SDL uses 424 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 425 | pFrame->linesize, 0, pCodecCtx->height, 426 | pict.data, pict.linesize); 427 | 428 | SDL_UnlockYUVOverlay(bmp); 429 | 430 | rect.x = 0; 431 | rect.y = 0; 432 | rect.w = pCodecCtx->width; 433 | rect.h = pCodecCtx->height; 434 | SDL_DisplayYUVOverlay(bmp, &rect); 435 | av_free_packet(&packet); 436 | } 437 | /// Is this a packet from audio streams? 438 | } else if(packet.stream_index == audioStream) { 439 | packet_queue_put(&audioq, &packet); 440 | } else { 441 | av_free_packet(&packet); 442 | } 443 | 444 | /// Free the packet that was allocated by av_read_frame 445 | SDL_PollEvent(&event); 446 | 447 | switch(event.type) { 448 | case SDL_QUIT: 449 | quit = 1; 450 | SDL_Quit(); 451 | exit(0); 452 | break; 453 | default: 454 | break; 455 | } 456 | 457 | } 458 | 459 | // Free the YUV frame 460 | av_free(pFrame); 461 | 462 | // Close the codec 463 | avcodec_close(pCodecCtx); 464 | 465 | // Close the video file 466 | avformat_close_input(&pFormatCtx); 467 | 468 | return 0; 469 | } 470 | -------------------------------------------------------------------------------- /tutorial04.c: -------------------------------------------------------------------------------- 1 | // tutorial04.c 2 | // A pedagogical video player that will stream through every video frame as fast as it can, 3 | // and play audio (out of sync). 4 | // 5 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 6 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 7 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 8 | // Use 9 | // 10 | // gcc -o tutorial04 tutorial04.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 11 | // to build (assuming libavformat and libavcodec are correctly installed, 12 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 13 | // 14 | // Run using 15 | // tutorial04 myvideofile.mpg 16 | // 17 | // to play the video stream on your screen. 18 | 19 | 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | #include 28 | #include 29 | 30 | #ifdef __MINGW32__ 31 | #undef main /* Prevents SDL from overriding main() */ 32 | #endif 33 | 34 | #include 35 | #include 36 | 37 | #define SDL_AUDIO_BUFFER_SIZE 1024 38 | 39 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 40 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 41 | 42 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 43 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 44 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 45 | 46 | #define VIDEO_PICTURE_QUEUE_SIZE 1 47 | 48 | typedef struct PacketQueue { 49 | AVPacketList *first_pkt, *last_pkt; 50 | int nb_packets; 51 | int size; 52 | SDL_mutex *mutex; 53 | SDL_cond *cond; 54 | } PacketQueue; 55 | 56 | 57 | typedef struct VideoPicture { 58 | SDL_Overlay *bmp; 59 | int width, height; /* source height & width */ 60 | int allocated; 61 | } VideoPicture; 62 | 63 | typedef struct VideoState { 64 | 65 | AVFormatContext *pFormatCtx; 66 | int videoStreamIdx, 67 | audioStreamIdx; 68 | 69 | /// Audio related 70 | AVStream *audio_st; 71 | PacketQueue audioq; 72 | uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; 73 | unsigned int audio_buf_size; 74 | unsigned int audio_buf_index; 75 | AVPacket audio_pkt; 76 | uint8_t *audio_pkt_data; 77 | int audio_pkt_size; 78 | 79 | /// Video related 80 | AVStream *video_st; 81 | PacketQueue videoq; 82 | 83 | VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; 84 | int pictq_size, 85 | pictq_rindex, 86 | pictq_windex; 87 | 88 | /// Thread communication related 89 | SDL_mutex *pictq_mutex; 90 | SDL_cond *pictq_cond; 91 | 92 | SDL_Thread *parse_tid; 93 | SDL_Thread *video_tid; 94 | 95 | char filename[1024]; 96 | int quit; 97 | } VideoState; 98 | 99 | SDL_Surface *screen; 100 | 101 | /* Since we only have one decoding thread, the Big Struct 102 | can be global in case we need it. */ 103 | VideoState *global_video_state; 104 | ///------------------------------------------------------------------------------------- 105 | 106 | void packet_queue_init(PacketQueue *q) { 107 | memset(q, 0, sizeof(PacketQueue)); 108 | q->mutex = SDL_CreateMutex(); 109 | q->cond = SDL_CreateCond(); 110 | } 111 | ///------------------------------------------------------------------------------------- 112 | 113 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 114 | 115 | AVPacketList *pkt1; 116 | if(av_dup_packet(pkt) < 0) { 117 | return -1; 118 | } 119 | pkt1 = av_malloc(sizeof(AVPacketList)); 120 | if (!pkt1) 121 | return -1; 122 | pkt1->pkt = *pkt; 123 | pkt1->next = NULL; 124 | 125 | SDL_LockMutex(q->mutex); 126 | 127 | if (!q->last_pkt) 128 | q->first_pkt = pkt1; 129 | else 130 | q->last_pkt->next = pkt1; 131 | q->last_pkt = pkt1; 132 | q->nb_packets++; 133 | q->size += pkt1->pkt.size; 134 | SDL_CondSignal(q->cond); 135 | 136 | SDL_UnlockMutex(q->mutex); 137 | return 0; 138 | } 139 | ///------------------------------------------------------------------------------------- 140 | 141 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 142 | { 143 | AVPacketList *pkt1; 144 | int ret; 145 | 146 | SDL_LockMutex(q->mutex); 147 | 148 | for(;;) { 149 | 150 | if(global_video_state->quit) { 151 | ret = -1; 152 | break; 153 | } 154 | 155 | pkt1 = q->first_pkt; 156 | if (pkt1) { 157 | q->first_pkt = pkt1->next; 158 | if (!q->first_pkt) 159 | q->last_pkt = NULL; 160 | q->nb_packets--; 161 | q->size -= pkt1->pkt.size; 162 | *pkt = pkt1->pkt; 163 | av_free(pkt1); 164 | ret = 1; 165 | break; 166 | } else if (!block) { 167 | ret = 0; 168 | break; 169 | } else { 170 | SDL_CondWait(q->cond, q->mutex); 171 | } 172 | } 173 | SDL_UnlockMutex(q->mutex); 174 | return ret; 175 | } 176 | ///------------------------------------------------------------------------------------- 177 | int audio_decode_frame(VideoState *vidState, uint8_t *audio_buf, int buf_size) 178 | //int audio_decode_frame(VideoState *vidState) 179 | { 180 | static AVFrame *decoded_aframe; 181 | int len1, data_size; 182 | AVPacket *pkt = &vidState->audio_pkt; 183 | static AVPacket pktTemp; 184 | 185 | for(;;) { 186 | while(vidState->audio_pkt_size > 0) { 187 | int got_frame = 0; 188 | if (!decoded_aframe) { 189 | if (!(decoded_aframe = avcodec_alloc_frame())) { 190 | fprintf(stderr, "out of memory\n"); 191 | exit(1); 192 | } 193 | } else 194 | avcodec_get_frame_defaults(decoded_aframe); 195 | 196 | data_size = buf_size; 197 | // len1 = avcodec_decode_audio2(is->audio_st->codec, 198 | // (int16_t *)audio_buf, &data_size, 199 | // is->audio_pkt_data, is->audio_pkt_size); 200 | len1 = avcodec_decode_audio4(vidState->audio_st->codec, decoded_aframe, &got_frame, &pktTemp); 201 | 202 | if(len1 < 0) { 203 | /* if error, skip frame */ 204 | vidState->audio_pkt_size = 0; 205 | break; 206 | } 207 | 208 | 209 | if (got_frame) { 210 | //printf("\nGot frame!"); 211 | //printf("\nFrame data size: %d", sizeof(decoded_aframe->data[0])); 212 | data_size = av_samples_get_buffer_size(NULL, vidState->audio_st->codec->channels, 213 | decoded_aframe->nb_samples, 214 | vidState->audio_st->codec->sample_fmt, 1); 215 | if (data_size > buf_size) { 216 | data_size = buf_size; 217 | } 218 | memcpy(audio_buf, decoded_aframe->data[0], data_size); 219 | 220 | }else{ 221 | data_size = 0; 222 | } 223 | pktTemp.data += len1; 224 | pktTemp.size -= len1; 225 | vidState->audio_pkt_data += len1; 226 | vidState->audio_pkt_size -= len1; 227 | if(data_size <= 0) { 228 | /* No data yet, get more frames */ 229 | continue; 230 | } 231 | /* We have data, return it and come back for more later */ 232 | return data_size; 233 | } 234 | if(pkt->data) 235 | av_free_packet(pkt); 236 | 237 | if(vidState->quit) { 238 | return -1; 239 | } 240 | /* next packet */ 241 | if(packet_queue_get(&vidState->audioq, pkt, 1) < 0) { 242 | return -1; 243 | } 244 | pktTemp.data = pkt->data; 245 | pktTemp.size = pkt->size; 246 | vidState->audio_pkt_data = pkt->data; 247 | vidState->audio_pkt_size = pkt->size; 248 | } 249 | } 250 | ///------------------------------------------------------------------------------------- 251 | void audio_callback(void *userdata, Uint8 *stream, int len) { 252 | 253 | VideoState *vidState = (VideoState *)userdata; 254 | int len1, audio_size; 255 | 256 | while(len > 0) { 257 | if(vidState->audio_buf_index >= vidState->audio_buf_size) { 258 | /// We have already sent all our data; get more 259 | //printf("\nSize of audio buffer: %ld", sizeof(vidState->audio_buf)); 260 | audio_size = audio_decode_frame(vidState, vidState->audio_buf, sizeof(vidState->audio_buf)); 261 | // audio_size = audio_decode_frame(vidState); 262 | if(audio_size < 0) { 263 | /* If error, output silence */ 264 | vidState->audio_buf_size = 1024; 265 | memset(vidState->audio_buf, 0, vidState->audio_buf_size); 266 | } else { 267 | vidState->audio_buf_size = audio_size; 268 | } 269 | vidState->audio_buf_index = 0; 270 | } 271 | len1 = vidState->audio_buf_size - vidState->audio_buf_index; 272 | if(len1 > len) 273 | len1 = len; 274 | memcpy(stream, (uint8_t *)vidState->audio_buf + vidState->audio_buf_index, len1); 275 | len -= len1; 276 | stream += len1; 277 | vidState->audio_buf_index += len1; 278 | } 279 | } 280 | ///------------------------------------------------------------------------------------- 281 | 282 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 283 | SDL_Event event; 284 | event.type = FF_REFRESH_EVENT; 285 | event.user.data1 = opaque; 286 | SDL_PushEvent(&event); 287 | return 0; /* 0 means stop timer */ 288 | } 289 | ///------------------------------------------------------------------------------------- 290 | 291 | /* schedule a video refresh in 'delay' ms */ 292 | static void schedule_refresh(VideoState *is, int delay) { 293 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 294 | } 295 | ///------------------------------------------------------------------------------------- 296 | 297 | void video_display(VideoState *vidState) { 298 | 299 | SDL_Rect rect; 300 | VideoPicture *vp; 301 | AVPicture pict; 302 | float aspect_ratio; 303 | int w, h, x, y; 304 | int i; 305 | 306 | vp = &vidState->pictq[vidState->pictq_rindex]; 307 | if(vp->bmp) { 308 | if(vidState->video_st->codec->sample_aspect_ratio.num == 0) { 309 | aspect_ratio = 0; 310 | } else { 311 | aspect_ratio = av_q2d(vidState->video_st->codec->sample_aspect_ratio) * 312 | vidState->video_st->codec->width / vidState->video_st->codec->height; 313 | } 314 | if(aspect_ratio <= 0.0) { 315 | aspect_ratio = (float)vidState->video_st->codec->width / 316 | (float)vidState->video_st->codec->height; 317 | } 318 | h = screen->h; 319 | w = ((int)rint(h * aspect_ratio)) & -3; 320 | if(w > screen->w) { 321 | w = screen->w; 322 | h = ((int)rint(w / aspect_ratio)) & -3; 323 | } 324 | x = (screen->w - w) / 2; 325 | y = (screen->h - h) / 2; 326 | 327 | rect.x = x; 328 | rect.y = y; 329 | rect.w = w; 330 | rect.h = h; 331 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 332 | } 333 | } 334 | ///------------------------------------------------------------------------------------- 335 | 336 | void video_refresh_timer(void *userdata) { 337 | 338 | VideoState *vidState = (VideoState *)userdata; 339 | VideoPicture *vp; 340 | 341 | if(vidState->video_st) { 342 | if(vidState->pictq_size == 0) { 343 | schedule_refresh(vidState, 1); 344 | } else { 345 | vp = &vidState->pictq[vidState->pictq_rindex]; 346 | 347 | /** Now, normally here goes a ton of code 348 | about timing, etc. we're just going to 349 | guess at a delay for now. You can 350 | increase and decrease this value and hard code 351 | the timing - but I don't suggest that ;) 352 | We'll learn how to do it for real later. 353 | */ 354 | /// Next video frame will be show after 80 ms 355 | schedule_refresh(vidState, 38); 356 | 357 | /// show the picture! 358 | video_display(vidState); 359 | 360 | /// update queue for next picture! 361 | if(++vidState->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 362 | vidState->pictq_rindex = 0; 363 | } 364 | SDL_LockMutex(vidState->pictq_mutex); 365 | vidState->pictq_size--; 366 | SDL_CondSignal(vidState->pictq_cond); 367 | SDL_UnlockMutex(vidState->pictq_mutex); 368 | } 369 | } else { 370 | schedule_refresh(vidState, 100); 371 | } 372 | } 373 | ///------------------------------------------------------------------------------------- 374 | 375 | void alloc_picture(void *userdata) { 376 | 377 | VideoState *is = (VideoState *)userdata; 378 | VideoPicture *vp; 379 | 380 | vp = &is->pictq[is->pictq_windex]; 381 | if(vp->bmp) { 382 | // we already have one make another, bigger/smaller 383 | SDL_FreeYUVOverlay(vp->bmp); 384 | } 385 | // Allocate a place to put our YUV image on that screen 386 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 387 | is->video_st->codec->height, 388 | SDL_YV12_OVERLAY, 389 | screen); 390 | vp->width = is->video_st->codec->width; 391 | vp->height = is->video_st->codec->height; 392 | 393 | SDL_LockMutex(is->pictq_mutex); 394 | vp->allocated = 1; 395 | SDL_CondSignal(is->pictq_cond); 396 | SDL_UnlockMutex(is->pictq_mutex); 397 | 398 | } 399 | ///------------------------------------------------------------------------------------- 400 | 401 | int queue_picture(VideoState *vidState, AVFrame *pFrame) { 402 | static struct SwsContext *img_convert_ctx; 403 | VideoPicture *vp; 404 | int dst_pix_fmt; 405 | AVPicture pict; 406 | 407 | /// wait until we have space for a new pic 408 | SDL_LockMutex(vidState->pictq_mutex); 409 | while(vidState->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 410 | !vidState->quit) { 411 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 412 | } 413 | SDL_UnlockMutex(vidState->pictq_mutex); 414 | 415 | if(vidState->quit) 416 | return -1; 417 | 418 | // windex is set to 0 initially 419 | vp = &vidState->pictq[vidState->pictq_windex]; 420 | 421 | /// allocate or resize the buffer! 422 | if(!vp->bmp || 423 | vp->width != vidState->video_st->codec->width || 424 | vp->height != vidState->video_st->codec->height) { 425 | SDL_Event event; 426 | 427 | vp->allocated = 0; 428 | 429 | /// we have to do it in the main thread 430 | event.type = FF_ALLOC_EVENT; 431 | event.user.data1 = vidState; 432 | SDL_PushEvent(&event); 433 | 434 | /// wait until we have a picture allocated 435 | SDL_LockMutex(vidState->pictq_mutex); 436 | while(!vp->allocated && !vidState->quit) { 437 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 438 | } 439 | SDL_UnlockMutex(vidState->pictq_mutex); 440 | if(vidState->quit) { 441 | return -1; 442 | } 443 | } 444 | 445 | /// We have a place to put our picture on the queue 446 | 447 | if(vp->bmp) { 448 | 449 | SDL_LockYUVOverlay(vp->bmp); 450 | 451 | dst_pix_fmt = PIX_FMT_YUV420P; 452 | /* point pict at the queue */ 453 | 454 | pict.data[0] = vp->bmp->pixels[0]; 455 | pict.data[1] = vp->bmp->pixels[2]; 456 | pict.data[2] = vp->bmp->pixels[1]; 457 | 458 | pict.linesize[0] = vp->bmp->pitches[0]; 459 | pict.linesize[1] = vp->bmp->pitches[2]; 460 | pict.linesize[2] = vp->bmp->pitches[1]; 461 | 462 | // Convert the image into YUV format that SDL uses 463 | // img_convert(&pict, dst_pix_fmt, 464 | // (AVPicture *)pFrame, is->video_st->codec->pix_fmt, 465 | // is->video_st->codec->width, is->video_st->codec->height); 466 | 467 | int w = vidState->video_st->codec->width; 468 | int h = vidState->video_st->codec->height; 469 | if (!img_convert_ctx) { 470 | img_convert_ctx = sws_getContext(w, h, vidState->video_st->codec->pix_fmt, 471 | w, h, dst_pix_fmt, 472 | SWS_BICUBIC, NULL, NULL, NULL); 473 | } 474 | 475 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 476 | pFrame->linesize, 0, h, 477 | pict.data, pict.linesize); 478 | 479 | 480 | SDL_UnlockYUVOverlay(vp->bmp); 481 | /* now we inform our display thread that we have a pic ready */ 482 | if(++vidState->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 483 | vidState->pictq_windex = 0; 484 | } 485 | SDL_LockMutex(vidState->pictq_mutex); 486 | vidState->pictq_size++; 487 | SDL_UnlockMutex(vidState->pictq_mutex); 488 | } 489 | return 0; 490 | } 491 | ///------------------------------------------------------------------------------------- 492 | 493 | int video_thread(void *arg) { 494 | VideoState *vidState = (VideoState *)arg; 495 | AVPacket pkt1, *packet = &pkt1; 496 | int len1, frameFinished; 497 | AVFrame *pFrame; 498 | 499 | pFrame = avcodec_alloc_frame(); 500 | 501 | for(;;) { 502 | if(packet_queue_get(&vidState->videoq, packet, 1) < 0) { 503 | // means we quit getting packets 504 | break; 505 | } 506 | /// Decode video frame 507 | // len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished, 508 | // packet->data, packet->size); 509 | avcodec_decode_video2(vidState->video_st->codec, pFrame, &frameFinished, packet); 510 | 511 | /// Put to picture queue if found a frame 512 | if(frameFinished) { 513 | if(queue_picture(vidState, pFrame) < 0) { 514 | break; 515 | } 516 | } 517 | av_free_packet(packet); 518 | } 519 | av_free(pFrame); 520 | return 0; 521 | } 522 | ///------------------------------------------------------------------------------------- 523 | 524 | int stream_component_open(VideoState *vidState, int stream_index) { 525 | 526 | AVFormatContext *pFormatCtx = vidState->pFormatCtx; 527 | AVCodecContext *codecCtx; 528 | AVCodec *codec; 529 | SDL_AudioSpec wanted_spec, spec; 530 | 531 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 532 | return -1; 533 | } 534 | 535 | // Get a pointer to the codec context for the video stream 536 | codecCtx = pFormatCtx->streams[stream_index]->codec; 537 | 538 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 539 | // Set audio settings from codec info 540 | wanted_spec.freq = codecCtx->sample_rate; 541 | wanted_spec.format = AUDIO_S16SYS; 542 | wanted_spec.channels = codecCtx->channels; 543 | wanted_spec.silence = 0; 544 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 545 | wanted_spec.callback = audio_callback; 546 | wanted_spec.userdata = vidState; 547 | 548 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 549 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 550 | return -1; 551 | } 552 | } 553 | codec = avcodec_find_decoder(codecCtx->codec_id); 554 | if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) { 555 | fprintf(stderr, "Unsupported codec!\n"); 556 | return -1; 557 | } 558 | 559 | switch(codecCtx->codec_type) { 560 | case AVMEDIA_TYPE_AUDIO: 561 | vidState->audioStreamIdx = stream_index; 562 | vidState->audio_st = pFormatCtx->streams[stream_index]; 563 | vidState->audio_buf_size = 0; 564 | vidState->audio_buf_index = 0; 565 | memset(&vidState->audio_pkt, 0, sizeof(vidState->audio_pkt)); 566 | packet_queue_init(&vidState->audioq); 567 | SDL_PauseAudio(0); ///unpaused audio callback to start output audio 568 | break; 569 | case AVMEDIA_TYPE_VIDEO: 570 | vidState->videoStreamIdx = stream_index; 571 | vidState->video_st = pFormatCtx->streams[stream_index]; 572 | 573 | packet_queue_init(&vidState->videoq); 574 | vidState->video_tid = SDL_CreateThread(video_thread, vidState); 575 | break; 576 | default: 577 | break; 578 | } 579 | } 580 | ///------------------------------------------------------------------------------------- 581 | 582 | //int decode_interrupt_cb(void) 583 | static int decode_interrupt_cb(void *ctx) 584 | { 585 | return (global_video_state && global_video_state->quit); 586 | } 587 | 588 | const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; 589 | 590 | ///------------------------------------------------------------------------------------- 591 | 592 | int decode_thread(void *arg) { 593 | 594 | VideoState *vidState = (VideoState *)arg; 595 | AVFormatContext *pFormatCtx; 596 | AVPacket pkt1, *packet = &pkt1; 597 | 598 | int video_index = -1; 599 | int audio_index = -1; 600 | int i; 601 | 602 | vidState->videoStreamIdx=-1; 603 | vidState->audioStreamIdx=-1; 604 | 605 | global_video_state = vidState; 606 | 607 | // will interrupt blocking functions if we quit! --> Deprecated, usind AVIOInterruptCB instead 608 | // url_set_interrupt_cb(decode_interrupt_cb); 609 | // avio_set_interrupt_cb(decode_interrupt_cb); 610 | 611 | pFormatCtx = avformat_alloc_context(); 612 | pFormatCtx->interrupt_callback = int_cb; 613 | 614 | if (avio_open2(&pFormatCtx->pb, vidState->filename, AVIO_FLAG_READ, &pFormatCtx->interrupt_callback, NULL)) 615 | return -1; 616 | 617 | // Open video file 618 | if (avformat_open_input(&pFormatCtx, vidState->filename, NULL, NULL)!=0) 619 | return -1; // Couldn't open file 620 | 621 | 622 | vidState->pFormatCtx = pFormatCtx; 623 | 624 | // Retrieve stream information 625 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 626 | return -1; // Couldn't find stream information 627 | 628 | // Dump information about file onto standard error 629 | av_dump_format(pFormatCtx, 0, vidState->filename, 0); 630 | 631 | // Find the first video stream 632 | for(i=0; inb_streams; i++) { 633 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 634 | video_index < 0) { 635 | video_index=i; 636 | } 637 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 638 | audio_index < 0) { 639 | audio_index=i; 640 | } 641 | } 642 | 643 | /// Find create threads for each stream 644 | if(audio_index >= 0) { 645 | stream_component_open(vidState, audio_index); 646 | } 647 | if(video_index >= 0) { 648 | stream_component_open(vidState, video_index); 649 | } 650 | 651 | if(vidState->videoStreamIdx < 0 || vidState->audioStreamIdx < 0) { 652 | fprintf(stderr, "%s: could not open codecs\n", vidState->filename); 653 | goto fail; 654 | } 655 | 656 | // main decode loop 657 | 658 | for(;;) { 659 | if(vidState->quit) { 660 | break; 661 | } 662 | 663 | 664 | /// If queue full, just wait 665 | if(vidState->audioq.size > MAX_AUDIOQ_SIZE || 666 | vidState->videoq.size > MAX_VIDEOQ_SIZE) { 667 | SDL_Delay(10); 668 | continue; 669 | } 670 | 671 | /// Read frame from file 672 | if(av_read_frame(vidState->pFormatCtx, packet) < 0) { 673 | // if(url_ferror(&pFormatCtx->pb) == 0) { // Deprecated 674 | if(&pFormatCtx->pb && &pFormatCtx->pb->error){ 675 | SDL_Delay(100); /* no error; wait for user input */ 676 | continue; 677 | } else { 678 | break; 679 | } 680 | } 681 | /// Push packet to corresponding queue 682 | if(packet->stream_index == vidState->videoStreamIdx) { 683 | packet_queue_put(&vidState->videoq, packet); 684 | } else if(packet->stream_index == vidState->audioStreamIdx) { 685 | packet_queue_put(&vidState->audioq, packet); 686 | } else { 687 | av_free_packet(packet); 688 | } 689 | } 690 | 691 | /// all done - wait for quit signal 692 | while(!vidState->quit) { 693 | SDL_Delay(100); 694 | } 695 | 696 | fail: 697 | if(1){ 698 | SDL_Event event; 699 | event.type = FF_QUIT_EVENT; 700 | event.user.data1 = vidState; 701 | SDL_PushEvent(&event); 702 | } 703 | return 0; 704 | } 705 | 706 | 707 | ///------------------------------------------------------------------------------------- 708 | int main(int argc, char *argv[]) { 709 | 710 | SDL_Event event; 711 | 712 | VideoState *vidState; 713 | 714 | vidState = av_mallocz(sizeof(VideoState)); 715 | 716 | if(argc < 2) { 717 | fprintf(stderr, "Usage: test \n"); 718 | exit(1); 719 | } 720 | // Register all formats and codecs 721 | av_register_all(); 722 | 723 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 724 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 725 | exit(1); 726 | } 727 | 728 | // Make a screen to put our video 729 | #ifndef __DARWIN__ 730 | screen = SDL_SetVideoMode(640, 480, 0, 0); 731 | #else 732 | screen = SDL_SetVideoMode(640, 480, 24, 0); 733 | #endif 734 | if(!screen) { 735 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 736 | exit(1); 737 | } 738 | 739 | av_strlcpy(vidState->filename, argv[1], sizeof(vidState->filename)); 740 | 741 | /// Init mutex for video queue 742 | vidState->pictq_mutex = SDL_CreateMutex(); 743 | vidState->pictq_cond = SDL_CreateCond(); 744 | 745 | schedule_refresh(vidState, 40); 746 | 747 | vidState->parse_tid = SDL_CreateThread(decode_thread, vidState); 748 | if(!vidState->parse_tid) { 749 | av_free(vidState); 750 | return -1; 751 | } 752 | for(;;) { 753 | 754 | SDL_WaitEvent(&event); 755 | switch(event.type) { 756 | case FF_QUIT_EVENT: 757 | case SDL_QUIT: 758 | vidState->quit = 1; 759 | SDL_Quit(); 760 | return 0; 761 | break; 762 | case FF_ALLOC_EVENT: 763 | alloc_picture(event.user.data1); 764 | break; 765 | case FF_REFRESH_EVENT: 766 | video_refresh_timer(event.user.data1); 767 | break; 768 | default: 769 | break; 770 | } 771 | } 772 | return 0; 773 | 774 | } 775 | -------------------------------------------------------------------------------- /tutorial05.c: -------------------------------------------------------------------------------- 1 | // tutorial05.c 2 | // A pedagogical video player that really works! 3 | // 4 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 5 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 6 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 7 | // Use 8 | // 9 | // gcc -o tutorial05 tutorial05.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 10 | // to build (assuming libavformat and libavcodec are correctly installed, 11 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 12 | // 13 | // Run using 14 | // tutorial05 myvideofile.mpg 15 | // 16 | // to play the video. 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | 27 | #ifdef __MINGW32__ 28 | #undef main /* Prevents SDL from overriding main() */ 29 | #endif 30 | 31 | #include 32 | #include 33 | 34 | #define SDL_AUDIO_BUFFER_SIZE 1024 35 | 36 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 37 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 38 | 39 | #define AV_SYNC_THRESHOLD 0.01 40 | #define AV_NOSYNC_THRESHOLD 10.0 41 | 42 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 43 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 44 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 45 | 46 | #define VIDEO_PICTURE_QUEUE_SIZE 1 47 | 48 | typedef struct PacketQueue { 49 | AVPacketList *first_pkt, *last_pkt; 50 | int nb_packets; 51 | int size; 52 | SDL_mutex *mutex; 53 | SDL_cond *cond; 54 | } PacketQueue; 55 | 56 | 57 | typedef struct VideoPicture { 58 | SDL_Overlay *bmp; 59 | int width, height; /* source height & width */ 60 | int allocated; 61 | double pts; 62 | } VideoPicture; 63 | 64 | typedef struct VideoState { 65 | 66 | AVFormatContext *pFormatCtx; 67 | int videoStream, audioStream; 68 | 69 | double audio_clock; 70 | AVStream *audio_st; 71 | PacketQueue audioq; 72 | uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; 73 | unsigned int audio_buf_size; 74 | unsigned int audio_buf_index; 75 | AVPacket audio_pkt; 76 | uint8_t *audio_pkt_data; 77 | int audio_pkt_size; 78 | int audio_hw_buf_size; 79 | double frame_timer; 80 | double frame_last_pts; 81 | double frame_last_delay; 82 | double video_clock; ///mutex = SDL_CreateMutex(); 107 | q->cond = SDL_CreateCond(); 108 | } 109 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 110 | 111 | AVPacketList *pkt1; 112 | if(av_dup_packet(pkt) < 0) { 113 | return -1; 114 | } 115 | pkt1 = av_malloc(sizeof(AVPacketList)); 116 | if (!pkt1) 117 | return -1; 118 | pkt1->pkt = *pkt; 119 | pkt1->next = NULL; 120 | 121 | SDL_LockMutex(q->mutex); 122 | 123 | if (!q->last_pkt) 124 | q->first_pkt = pkt1; 125 | else 126 | q->last_pkt->next = pkt1; 127 | q->last_pkt = pkt1; 128 | q->nb_packets++; 129 | q->size += pkt1->pkt.size; 130 | SDL_CondSignal(q->cond); 131 | 132 | SDL_UnlockMutex(q->mutex); 133 | return 0; 134 | } 135 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 136 | { 137 | AVPacketList *pkt1; 138 | int ret; 139 | 140 | SDL_LockMutex(q->mutex); 141 | 142 | for(;;) { 143 | 144 | if(global_video_state->quit) { 145 | ret = -1; 146 | break; 147 | } 148 | 149 | pkt1 = q->first_pkt; 150 | if (pkt1) { 151 | q->first_pkt = pkt1->next; 152 | if (!q->first_pkt) 153 | q->last_pkt = NULL; 154 | q->nb_packets--; 155 | q->size -= pkt1->pkt.size; 156 | *pkt = pkt1->pkt; 157 | av_free(pkt1); 158 | ret = 1; 159 | break; 160 | } else if (!block) { 161 | ret = 0; 162 | break; 163 | } else { 164 | SDL_CondWait(q->cond, q->mutex); 165 | } 166 | } 167 | SDL_UnlockMutex(q->mutex); 168 | return ret; 169 | } 170 | double get_audio_clock(VideoState *is) { 171 | double pts; 172 | int hw_buf_size, bytes_per_sec, n; 173 | 174 | pts = is->audio_clock; /* maintained in the audio thread */ 175 | hw_buf_size = is->audio_buf_size - is->audio_buf_index; 176 | bytes_per_sec = 0; 177 | n = is->audio_st->codec->channels * 2; 178 | if(is->audio_st) { 179 | bytes_per_sec = is->audio_st->codec->sample_rate * n; 180 | } 181 | if(bytes_per_sec) { 182 | pts -= (double)hw_buf_size / bytes_per_sec; 183 | } 184 | return pts; 185 | } 186 | 187 | int audio_decode_frame(VideoState *vidState, uint8_t *audio_buf, int buf_size, double *pts_ptr) { 188 | 189 | int len1, data_size, n; 190 | AVPacket *pkt = &vidState->audio_pkt; 191 | static AVPacket pktTemp; 192 | double pts; 193 | static AVFrame *decoded_aframe; 194 | 195 | for(;;) { 196 | while(vidState->audio_pkt_size > 0) { 197 | int got_frame = 0; 198 | if (!decoded_aframe) { 199 | if (!(decoded_aframe = avcodec_alloc_frame())) { 200 | fprintf(stderr, "out of memory\n"); 201 | exit(1); 202 | } 203 | } else 204 | avcodec_get_frame_defaults(decoded_aframe); 205 | 206 | data_size = buf_size; 207 | // len1 = avcodec_decode_audio2(is->audio_st->codec, 208 | // (int16_t *)audio_buf, &data_size, 209 | // is->audio_pkt_data, is->audio_pkt_size); 210 | len1 = avcodec_decode_audio4(vidState->audio_st->codec, decoded_aframe, &got_frame, &pktTemp); 211 | 212 | if(len1 < 0) { 213 | /* if error, skip frame */ 214 | vidState->audio_pkt_size = 0; 215 | break; 216 | } 217 | 218 | if (got_frame) { 219 | //printf("\nGot frame!"); 220 | //printf("\nFrame data size: %d", sizeof(decoded_aframe->data[0])); 221 | data_size = av_samples_get_buffer_size(NULL, vidState->audio_st->codec->channels, 222 | decoded_aframe->nb_samples, 223 | vidState->audio_st->codec->sample_fmt, 1); 224 | if (data_size > buf_size) { 225 | data_size = buf_size; 226 | } 227 | memcpy(audio_buf, decoded_aframe->data[0], data_size); 228 | 229 | }else{ 230 | data_size = 0; 231 | } 232 | pktTemp.data += len1; 233 | pktTemp.size -= len1; 234 | vidState->audio_pkt_data += len1; 235 | vidState->audio_pkt_size -= len1; 236 | if(data_size <= 0) { 237 | /* No data yet, get more frames */ 238 | continue; 239 | } 240 | 241 | pts = vidState->audio_clock; 242 | *pts_ptr = pts; 243 | n = 2 * vidState->audio_st->codec->channels; // number of bit per-sample (all channels) 244 | vidState->audio_clock += (double)data_size / 245 | (double)(n * vidState->audio_st->codec->sample_rate); 246 | 247 | /* We have data, return it and come back for more later */ 248 | return data_size; 249 | } 250 | if(pkt->data) 251 | av_free_packet(pkt); 252 | 253 | if(vidState->quit) { 254 | return -1; 255 | } 256 | /* next packet */ 257 | if(packet_queue_get(&vidState->audioq, pkt, 1) < 0) { 258 | return -1; 259 | } 260 | pktTemp.data = pkt->data; 261 | pktTemp.size = pkt->size; 262 | vidState->audio_pkt_data = pkt->data; 263 | vidState->audio_pkt_size = pkt->size; 264 | /* if update, update the audio clock w/pts */ 265 | if(pkt->pts != AV_NOPTS_VALUE) { 266 | vidState->audio_clock = av_q2d(vidState->audio_st->time_base)*pkt->pts; 267 | } 268 | 269 | } 270 | } 271 | 272 | void audio_callback(void *userdata, Uint8 *stream, int len) { 273 | 274 | VideoState *is = (VideoState *)userdata; 275 | int len1, audio_size; 276 | double pts; 277 | 278 | while(len > 0) { 279 | if(is->audio_buf_index >= is->audio_buf_size) { 280 | /* We have already sent all our data; get more */ 281 | audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts); 282 | if(audio_size < 0) { 283 | /* If error, output silence */ 284 | is->audio_buf_size = 1024; 285 | memset(is->audio_buf, 0, is->audio_buf_size); 286 | } else { 287 | is->audio_buf_size = audio_size; 288 | } 289 | is->audio_buf_index = 0; 290 | } 291 | len1 = is->audio_buf_size - is->audio_buf_index; 292 | if(len1 > len) 293 | len1 = len; 294 | memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); 295 | len -= len1; 296 | stream += len1; 297 | is->audio_buf_index += len1; 298 | } 299 | } 300 | 301 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 302 | SDL_Event event; 303 | event.type = FF_REFRESH_EVENT; 304 | event.user.data1 = opaque; 305 | SDL_PushEvent(&event); 306 | return 0; /* 0 means stop timer */ 307 | } 308 | 309 | /* schedule a video refresh in 'delay' ms */ 310 | static void schedule_refresh(VideoState *is, int delay) { 311 | SDL_AddTimer(delay, sdl_refresh_timer_cb, is); 312 | } 313 | 314 | void video_display(VideoState *vidState) { 315 | 316 | SDL_Rect rect; 317 | VideoPicture *vp; 318 | // AVPicture pict; 319 | float aspect_ratio; 320 | int w, h, x, y; 321 | // int i; 322 | 323 | vp = &vidState->pictq[vidState->pictq_rindex]; 324 | if(vp->bmp) { 325 | if(vidState->video_st->codec->sample_aspect_ratio.num == 0) { 326 | aspect_ratio = 0; 327 | } else { 328 | aspect_ratio = av_q2d(vidState->video_st->codec->sample_aspect_ratio) * 329 | vidState->video_st->codec->width / vidState->video_st->codec->height; 330 | } 331 | if(aspect_ratio <= 0.0) { 332 | aspect_ratio = (float)vidState->video_st->codec->width / 333 | (float)vidState->video_st->codec->height; 334 | } 335 | h = screen->h; 336 | w = ((int)rint(h * aspect_ratio)) & -3; 337 | if(w > screen->w) { 338 | w = screen->w; 339 | h = ((int)rint(w / aspect_ratio)) & -3; 340 | } 341 | x = (screen->w - w) / 2; 342 | y = (screen->h - h) / 2; 343 | 344 | rect.x = x; 345 | rect.y = y; 346 | rect.w = w; 347 | rect.h = h; 348 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 349 | } 350 | } 351 | 352 | void video_refresh_timer(void *userdata) { 353 | 354 | VideoState *vidState = (VideoState *)userdata; 355 | VideoPicture *vp; 356 | double actual_delay, delay, sync_threshold, ref_clock, diff; 357 | 358 | if(vidState->video_st) { 359 | if(vidState->pictq_size == 0) { 360 | schedule_refresh(vidState, 1); 361 | } else { 362 | vp = &vidState->pictq[vidState->pictq_rindex]; 363 | 364 | 365 | delay = vp->pts - vidState->frame_last_pts; /* the pts from last time */ 366 | if(delay <= 0 || delay >= 1.0) { 367 | /** if incorrect delay, use previous one */ 368 | delay = vidState->frame_last_delay; 369 | } 370 | /** save for next time */ 371 | vidState->frame_last_delay = delay; 372 | vidState->frame_last_pts = vp->pts; 373 | 374 | /* update delay to sync to audio */ 375 | ref_clock = get_audio_clock(vidState); 376 | diff = vp->pts - ref_clock; 377 | 378 | /** Skip or repeat the frame. Take delay into account 379 | FFPlay still doesn't "know if this is the best guess." */ 380 | /// Make the greater gap to 381 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 382 | 383 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 384 | if(diff <= -sync_threshold) { // video is behind? 385 | delay = 0; 386 | } else if(diff >= sync_threshold) { // video is ahead 387 | delay = 2 * delay; 388 | } 389 | } 390 | vidState->frame_timer += delay; 391 | /* computer the REAL delay */ 392 | actual_delay = vidState->frame_timer - (av_gettime() / 1000000.0); 393 | if(actual_delay < 0.010) { 394 | /* Really it should skip the picture instead */ 395 | actual_delay = 0.010; 396 | } 397 | 398 | 399 | schedule_refresh(vidState, (int)(actual_delay * 1000 + 0.5)); 400 | /* show the picture! */ 401 | video_display(vidState); 402 | 403 | /* update queue for next picture! */ 404 | if(++vidState->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 405 | vidState->pictq_rindex = 0; 406 | } 407 | SDL_LockMutex(vidState->pictq_mutex); 408 | vidState->pictq_size--; 409 | SDL_CondSignal(vidState->pictq_cond); 410 | SDL_UnlockMutex(vidState->pictq_mutex); 411 | } 412 | } else { 413 | schedule_refresh(vidState, 100); 414 | } 415 | } 416 | 417 | void alloc_picture(void *userdata) { 418 | 419 | VideoState *is = (VideoState *)userdata; 420 | VideoPicture *vp; 421 | 422 | vp = &is->pictq[is->pictq_windex]; 423 | if(vp->bmp) { 424 | // we already have one make another, bigger/smaller 425 | SDL_FreeYUVOverlay(vp->bmp); 426 | } 427 | // Allocate a place to put our YUV image on that screen 428 | vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, 429 | is->video_st->codec->height, 430 | SDL_YV12_OVERLAY, 431 | screen); 432 | vp->width = is->video_st->codec->width; 433 | vp->height = is->video_st->codec->height; 434 | 435 | SDL_LockMutex(is->pictq_mutex); 436 | vp->allocated = 1; 437 | SDL_CondSignal(is->pictq_cond); 438 | SDL_UnlockMutex(is->pictq_mutex); 439 | 440 | } 441 | 442 | int queue_picture(VideoState *vidState, AVFrame *pFrame, double pts) { 443 | 444 | VideoPicture *vp; 445 | int dst_pix_fmt; 446 | AVPicture pict; 447 | 448 | /* wait until we have space for a new pic */ 449 | SDL_LockMutex(vidState->pictq_mutex); 450 | while(vidState->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 451 | !vidState->quit) { 452 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 453 | } 454 | SDL_UnlockMutex(vidState->pictq_mutex); 455 | 456 | if(vidState->quit) 457 | return -1; 458 | 459 | // windex is set to 0 initially 460 | vp = &vidState->pictq[vidState->pictq_windex]; 461 | 462 | /* allocate or resize the buffer! */ 463 | if(!vp->bmp || 464 | vp->width != vidState->video_st->codec->width || 465 | vp->height != vidState->video_st->codec->height) { 466 | SDL_Event event; 467 | 468 | vp->allocated = 0; 469 | /* we have to do it in the main thread */ 470 | event.type = FF_ALLOC_EVENT; 471 | event.user.data1 = vidState; 472 | SDL_PushEvent(&event); 473 | 474 | /* wait until we have a picture allocated */ 475 | SDL_LockMutex(vidState->pictq_mutex); 476 | while(!vp->allocated && !vidState->quit) { 477 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 478 | } 479 | SDL_UnlockMutex(vidState->pictq_mutex); 480 | if(vidState->quit) { 481 | return -1; 482 | } 483 | } 484 | /* We have a place to put our picture on the queue */ 485 | /* If we are skipping a frame, do we set this to null 486 | but still return vp->allocated = 1? */ 487 | 488 | 489 | if(vp->bmp) { 490 | 491 | SDL_LockYUVOverlay(vp->bmp); 492 | 493 | dst_pix_fmt = PIX_FMT_YUV420P; 494 | /* point pict at the queue */ 495 | 496 | pict.data[0] = vp->bmp->pixels[0]; 497 | pict.data[1] = vp->bmp->pixels[2]; 498 | pict.data[2] = vp->bmp->pixels[1]; 499 | 500 | pict.linesize[0] = vp->bmp->pitches[0]; 501 | pict.linesize[1] = vp->bmp->pitches[2]; 502 | pict.linesize[2] = vp->bmp->pitches[1]; 503 | 504 | // Convert the image into YUV format that SDL uses 505 | // img_convert(&pict, dst_pix_fmt, 506 | // (AVPicture *)pFrame, is->video_st->codec->pix_fmt, 507 | // is->video_st->codec->width, is->video_st->codec->height); 508 | 509 | static struct SwsContext *img_convert_ctx; 510 | int w = vidState->video_st->codec->width; 511 | int h = vidState->video_st->codec->height; 512 | if (!img_convert_ctx) { 513 | img_convert_ctx = sws_getContext(w, h, vidState->video_st->codec->pix_fmt, 514 | w, h, dst_pix_fmt, 515 | SWS_X, NULL, NULL, NULL); 516 | } 517 | 518 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 519 | pFrame->linesize, 0, h, 520 | pict.data, pict.linesize); 521 | SDL_UnlockYUVOverlay(vp->bmp); 522 | vp->pts = pts; 523 | 524 | /* now we inform our display thread that we have a pic ready */ 525 | if(++vidState->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 526 | vidState->pictq_windex = 0; 527 | } 528 | SDL_LockMutex(vidState->pictq_mutex); 529 | vidState->pictq_size++; 530 | SDL_UnlockMutex(vidState->pictq_mutex); 531 | } 532 | return 0; 533 | } 534 | 535 | double synchronize_video(VideoState *vidState, AVFrame *src_frame, double pts) { 536 | 537 | double frame_delay; 538 | 539 | if(pts != 0) { 540 | /* if we have pts, set video clock to it */ 541 | vidState->video_clock = pts; 542 | } else { 543 | /* if we aren't given a pts, set it to the clock */ 544 | pts = vidState->video_clock; 545 | } 546 | /// update the video clock, if we are repeating a frame, adjust clock accordingly 547 | frame_delay = av_q2d(vidState->video_st->codec->time_base); 548 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); // ???:why *0.5 549 | vidState->video_clock += frame_delay; 550 | 551 | return pts; 552 | } 553 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 554 | 555 | /* These are called whenever we allocate a frame 556 | * buffer. We use this to store the global_pts in 557 | * a frame at the time it is allocated. 558 | */ 559 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 560 | int ret = avcodec_default_get_buffer(c, pic); 561 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 562 | //printf("\t1st pkt's pts: %lld", global_video_pkt_pts); 563 | *pts = global_video_pkt_pts; 564 | pic->opaque = pts; 565 | return ret; 566 | } 567 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 568 | if(pic) av_freep(&pic->opaque); 569 | avcodec_default_release_buffer(c, pic); 570 | } 571 | 572 | int video_thread(void *arg) { 573 | VideoState *vidState = (VideoState *)arg; 574 | AVPacket pkt1, *packet = &pkt1; 575 | //int len1; 576 | int frameFinished; 577 | AVFrame *pFrame; 578 | double pts; 579 | 580 | pFrame = avcodec_alloc_frame(); 581 | 582 | for(;;) { 583 | if(packet_queue_get(&vidState->videoq, packet, 1) < 0) { 584 | // means we quit getting packets 585 | break; 586 | } 587 | pts = 0; 588 | 589 | /// Save global pts to be stored in pFrame in first call 590 | /** 591 | (?) How to know it is the first call? 592 | (@) It actually be called everytime a new packet read, but 593 | the our_get_buffer is called only when a new frame is about 594 | to be created, it meant only first packet's pts is used. 595 | */ 596 | 597 | //printf("\nPkt's dts: %lld", packet->dts); 598 | global_video_pkt_pts = packet->pts; 599 | 600 | // Decode video frame 601 | // len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished, packet->data, packet->size); -- Deprecated 602 | avcodec_decode_video2(vidState->video_st->codec, pFrame, &frameFinished, packet); 603 | 604 | /// Check custom pts value 605 | if (packet->dts == AV_NOPTS_VALUE 606 | && pFrame->opaque 607 | && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 608 | pts = *(uint64_t *)pFrame->opaque; 609 | } else if(packet->dts != AV_NOPTS_VALUE) { 610 | pts = packet->dts; 611 | 612 | } else { 613 | pts = 0; 614 | } 615 | pts *= av_q2d(vidState->video_st->time_base); 616 | 617 | // Did we get a video frame? 618 | if(frameFinished) { 619 | pts = synchronize_video(vidState, pFrame, pts); 620 | if(queue_picture(vidState, pFrame, pts) < 0) { 621 | break; 622 | } 623 | } 624 | av_free_packet(packet); 625 | } 626 | 627 | 628 | av_free(pFrame); 629 | return 0; 630 | } 631 | 632 | int stream_component_open(VideoState *is, int stream_index) { 633 | 634 | AVFormatContext *pFormatCtx = is->pFormatCtx; 635 | AVCodecContext *codecCtx; 636 | AVCodec *codec; 637 | SDL_AudioSpec wanted_spec, spec; 638 | 639 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 640 | return -1; 641 | } 642 | 643 | // Get a pointer to the codec context for the video stream 644 | codecCtx = pFormatCtx->streams[stream_index]->codec; 645 | 646 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 647 | // Set audio settings from codec info 648 | wanted_spec.freq = codecCtx->sample_rate; 649 | wanted_spec.format = AUDIO_S16SYS; 650 | wanted_spec.channels = codecCtx->channels; 651 | wanted_spec.silence = 0; 652 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 653 | wanted_spec.callback = audio_callback; 654 | wanted_spec.userdata = is; 655 | 656 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 657 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 658 | return -1; 659 | } 660 | is->audio_hw_buf_size = spec.size; 661 | } 662 | codec = avcodec_find_decoder(codecCtx->codec_id); 663 | 664 | if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) { 665 | fprintf(stderr, "Unsupported codec!\n"); 666 | return -1; 667 | } 668 | 669 | switch(codecCtx->codec_type) { 670 | case AVMEDIA_TYPE_AUDIO: 671 | is->audioStream = stream_index; 672 | is->audio_st = pFormatCtx->streams[stream_index]; 673 | is->audio_buf_size = 0; 674 | is->audio_buf_index = 0; 675 | memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); 676 | packet_queue_init(&is->audioq); 677 | SDL_PauseAudio(0); 678 | break; 679 | case AVMEDIA_TYPE_VIDEO: 680 | is->videoStream = stream_index; 681 | is->video_st = pFormatCtx->streams[stream_index]; 682 | 683 | is->frame_timer = (double)av_gettime() / 1000000.0; 684 | is->frame_last_delay = 40e-3; 685 | 686 | packet_queue_init(&is->videoq); 687 | is->video_tid = SDL_CreateThread(video_thread, is); 688 | 689 | /** 690 | Add function to customize the memory allocating 691 | for a frame 692 | */ 693 | codecCtx->get_buffer = our_get_buffer; 694 | codecCtx->release_buffer = our_release_buffer; 695 | break; 696 | default: 697 | break; 698 | } 699 | } 700 | 701 | int decode_interrupt_cb(void * ctx) { 702 | return (global_video_state && global_video_state->quit); 703 | } 704 | 705 | const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; 706 | 707 | int decode_thread(void *arg) { 708 | 709 | VideoState *vidState = (VideoState *)arg; 710 | AVFormatContext *pFormatCtx; 711 | AVPacket pkt1, *packet = &pkt1; 712 | 713 | int video_index = -1; 714 | int audio_index = -1; 715 | int i; 716 | 717 | vidState->videoStream=-1; 718 | vidState->audioStream=-1; 719 | 720 | global_video_state = vidState; 721 | // will interrupt blocking functions if we quit! 722 | // url_set_interrupt_cb(decode_interrupt_cb); 723 | // if(av_open_input_file(&pFormatCtx, vidState->filename, NULL, 0, NULL)!=0) 724 | // return -1; // Couldn't open file 725 | 726 | // will interrupt blocking functions if we quit! --> Deprecated, usind AVIOInterruptCB instead 727 | // url_set_interrupt_cb(decode_interrupt_cb); 728 | // avio_set_interrupt_cb(decode_interrupt_cb); 729 | 730 | pFormatCtx = avformat_alloc_context(); 731 | pFormatCtx->interrupt_callback = int_cb; 732 | 733 | if (avio_open2(&pFormatCtx->pb, vidState->filename, AVIO_FLAG_READ, &pFormatCtx->interrupt_callback, NULL)) 734 | return -1; 735 | 736 | // Open video file 737 | if (avformat_open_input(&pFormatCtx, vidState->filename, NULL, NULL)!=0) 738 | return -1; // Couldn't open file 739 | 740 | 741 | 742 | vidState->pFormatCtx = pFormatCtx; 743 | 744 | // Retrieve stream information 745 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 746 | return -1; // Couldn't find stream information 747 | 748 | // Dump information about file onto standard error 749 | av_dump_format(pFormatCtx, 0, vidState->filename, 0); 750 | 751 | // Find the first video stream 752 | 753 | for(i=0; inb_streams; i++) { 754 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 755 | video_index < 0) { 756 | video_index=i; 757 | } 758 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 759 | audio_index < 0) { 760 | audio_index=i; 761 | } 762 | } 763 | if(audio_index >= 0) { 764 | stream_component_open(vidState, audio_index); 765 | } 766 | if(video_index >= 0) { 767 | stream_component_open(vidState, video_index); 768 | } 769 | 770 | if(vidState->videoStream < 0 || vidState->audioStream < 0) { 771 | fprintf(stderr, "%s: could not open codecs\n", vidState->filename); 772 | goto fail; 773 | } 774 | 775 | // main decode loop 776 | 777 | for(;;) { 778 | if(vidState->quit) { 779 | break; 780 | } 781 | // seek stuff goes here 782 | if(vidState->audioq.size > MAX_AUDIOQ_SIZE || 783 | vidState->videoq.size > MAX_VIDEOQ_SIZE) { 784 | SDL_Delay(10); 785 | continue; 786 | } 787 | if(av_read_frame(vidState->pFormatCtx, packet) < 0) { 788 | //if(url_ferror(&pFormatCtx->pb) == 0) 789 | if(&pFormatCtx->pb && &pFormatCtx->pb->error) 790 | { 791 | SDL_Delay(100); /* no error; wait for user input */ 792 | continue; 793 | } else { 794 | break; 795 | } 796 | } 797 | // Is this a packet from the video stream? 798 | if(packet->stream_index == vidState->videoStream) { 799 | packet_queue_put(&vidState->videoq, packet); 800 | } else if(packet->stream_index == vidState->audioStream) { 801 | packet_queue_put(&vidState->audioq, packet); 802 | } else { 803 | av_free_packet(packet); 804 | } 805 | } 806 | /* all done - wait for it */ 807 | while(!vidState->quit) { 808 | SDL_Delay(100); 809 | } 810 | 811 | fail: 812 | { 813 | SDL_Event event; 814 | event.type = FF_QUIT_EVENT; 815 | event.user.data1 = vidState; 816 | SDL_PushEvent(&event); 817 | } 818 | return 0; 819 | } 820 | 821 | int main(int argc, char *argv[]) { 822 | 823 | SDL_Event event; 824 | 825 | VideoState *vidState; 826 | 827 | vidState = av_mallocz(sizeof(VideoState)); 828 | 829 | if(argc < 2) { 830 | fprintf(stderr, "Usage: test \n"); 831 | exit(1); 832 | } 833 | // Register all formats and codecs 834 | av_register_all(); 835 | 836 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 837 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 838 | exit(1); 839 | } 840 | 841 | // Make a screen to put our video 842 | #ifndef __DARWIN__ 843 | screen = SDL_SetVideoMode(640, 480, 0, 0); 844 | #else 845 | screen = SDL_SetVideoMode(640, 480, 24, 0); 846 | #endif 847 | if(!screen) { 848 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 849 | exit(1); 850 | } 851 | 852 | av_strlcpy(vidState->filename, argv[1], sizeof(vidState->filename)); 853 | 854 | vidState->pictq_mutex = SDL_CreateMutex(); 855 | vidState->pictq_cond = SDL_CreateCond(); 856 | 857 | schedule_refresh(vidState, 40); 858 | 859 | vidState->parse_tid = SDL_CreateThread(decode_thread, vidState); 860 | if(!vidState->parse_tid) { 861 | av_free(vidState); 862 | return -1; 863 | } 864 | for(;;) { 865 | 866 | SDL_WaitEvent(&event); 867 | switch(event.type) { 868 | case FF_QUIT_EVENT: 869 | case SDL_QUIT: 870 | vidState->quit = 1; 871 | SDL_Quit(); 872 | exit(0); 873 | break; 874 | case FF_ALLOC_EVENT: 875 | alloc_picture(event.user.data1); 876 | break; 877 | case FF_REFRESH_EVENT: 878 | video_refresh_timer(event.user.data1); 879 | break; 880 | default: 881 | break; 882 | } 883 | } 884 | return 0; 885 | 886 | } 887 | -------------------------------------------------------------------------------- /tutorial06.c: -------------------------------------------------------------------------------- 1 | // tutorial06.c 2 | // A pedagogical video player that really works! 3 | // 4 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 5 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 6 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 7 | // Use 8 | // 9 | // gcc -o tutorial02 tutorial02.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 10 | // to build (assuming libavformat and libavcodec are correctly installed, 11 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 12 | // 13 | // Run using 14 | // tutorial06 myvideofile.mpg 15 | // 16 | // to play the video. 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | 27 | #ifdef __MINGW32__ 28 | #undef main /* Prevents SDL from overriding main() */ 29 | #endif 30 | 31 | #include 32 | #include 33 | 34 | #define SDL_AUDIO_BUFFER_SIZE 1024 35 | 36 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 37 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 38 | 39 | #define AV_SYNC_THRESHOLD 0.01 40 | #define AV_NOSYNC_THRESHOLD 10.0 41 | 42 | #define SAMPLE_CORRECTION_PERCENT_MAX 10 43 | #define AUDIO_DIFF_AVG_NB 20 44 | 45 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 46 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 47 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 48 | 49 | #define VIDEO_PICTURE_QUEUE_SIZE 1 50 | 51 | #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER 52 | 53 | typedef struct PacketQueue { 54 | AVPacketList *first_pkt, *last_pkt; 55 | int nb_packets; 56 | int size; 57 | SDL_mutex *mutex; 58 | SDL_cond *cond; 59 | } PacketQueue; 60 | 61 | 62 | typedef struct VideoPicture { 63 | SDL_Overlay *bmp; 64 | int width, height; /* source height & width */ 65 | int allocated; 66 | double pts; 67 | } VideoPicture; 68 | 69 | typedef struct VideoState { 70 | 71 | AVFormatContext *pFormatCtx; 72 | int videoStream, audioStream; 73 | 74 | int av_sync_type; 75 | double external_clock; /* external clock base */ 76 | int64_t external_clock_time; 77 | 78 | double audio_clock; 79 | AVStream *audio_st; 80 | PacketQueue audioq; 81 | uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; 82 | unsigned int audio_buf_size; 83 | unsigned int audio_buf_index; 84 | AVPacket audio_pkt; 85 | uint8_t *audio_pkt_data; 86 | int audio_pkt_size; 87 | int audio_hw_buf_size; 88 | double audio_diff_cum; /* used for AV difference average computation */ 89 | double audio_diff_avg_coef; 90 | double audio_diff_threshold; 91 | int audio_diff_avg_count; 92 | double frame_timer; 93 | double frame_last_pts; 94 | double frame_last_delay; 95 | double video_clock; ///mutex = SDL_CreateMutex(); 128 | q->cond = SDL_CreateCond(); 129 | } 130 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 131 | 132 | AVPacketList *pkt1; 133 | if(av_dup_packet(pkt) < 0) { 134 | return -1; 135 | } 136 | pkt1 = av_malloc(sizeof(AVPacketList)); 137 | if (!pkt1) 138 | return -1; 139 | pkt1->pkt = *pkt; 140 | pkt1->next = NULL; 141 | 142 | SDL_LockMutex(q->mutex); 143 | 144 | if (!q->last_pkt) 145 | q->first_pkt = pkt1; 146 | else 147 | q->last_pkt->next = pkt1; 148 | q->last_pkt = pkt1; 149 | q->nb_packets++; 150 | q->size += pkt1->pkt.size; 151 | SDL_CondSignal(q->cond); 152 | 153 | SDL_UnlockMutex(q->mutex); 154 | return 0; 155 | } 156 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 157 | { 158 | AVPacketList *pkt1; 159 | int ret; 160 | 161 | SDL_LockMutex(q->mutex); 162 | 163 | for(;;) { 164 | 165 | if(global_video_state->quit) { 166 | ret = -1; 167 | break; 168 | } 169 | 170 | pkt1 = q->first_pkt; 171 | if (pkt1) { 172 | q->first_pkt = pkt1->next; 173 | if (!q->first_pkt) 174 | q->last_pkt = NULL; 175 | q->nb_packets--; 176 | q->size -= pkt1->pkt.size; 177 | *pkt = pkt1->pkt; 178 | av_free(pkt1); 179 | ret = 1; 180 | break; 181 | } else if (!block) { 182 | ret = 0; 183 | break; 184 | } else { 185 | SDL_CondWait(q->cond, q->mutex); 186 | } 187 | } 188 | SDL_UnlockMutex(q->mutex); 189 | return ret; 190 | } 191 | double get_audio_clock(VideoState *vidState) { 192 | double pts; 193 | int hw_buf_size, bytes_per_sec, n; 194 | 195 | pts = vidState->audio_clock; /* maintained in the audio thread */ 196 | hw_buf_size = vidState->audio_buf_size - vidState->audio_buf_index; 197 | bytes_per_sec = 0; 198 | n = vidState->audio_st->codec->channels * 2; 199 | if(vidState->audio_st) { 200 | bytes_per_sec = vidState->audio_st->codec->sample_rate * n; 201 | } 202 | if(bytes_per_sec) { 203 | pts -= (double)hw_buf_size / bytes_per_sec; 204 | } 205 | return pts; 206 | } 207 | double get_video_clock(VideoState *vidState) { 208 | double delta; 209 | 210 | delta = (av_gettime() - vidState->video_current_pts_time) / 1000000.0; 211 | return vidState->video_current_pts + delta; 212 | } 213 | double get_external_clock(VideoState *vidState) { 214 | return av_gettime() / 1000000.0; 215 | } 216 | 217 | double get_master_clock(VideoState *vidState) { 218 | if(vidState->av_sync_type == AV_SYNC_VIDEO_MASTER) { 219 | return get_video_clock(vidState); 220 | } else if(vidState->av_sync_type == AV_SYNC_AUDIO_MASTER) { 221 | return get_audio_clock(vidState); 222 | } else { 223 | return get_external_clock(vidState); 224 | } 225 | } 226 | /* Add or subtract samples to get a better sync, return new 227 | audio buffer size */ 228 | int synchronize_audio(VideoState *vidState, short *samples, 229 | int samples_size, double pts) { 230 | int n; 231 | double ref_clock; 232 | 233 | n = 2 * vidState->audio_st->codec->channels; 234 | 235 | if(vidState->av_sync_type != AV_SYNC_AUDIO_MASTER) { 236 | double diff, avg_diff; 237 | int wanted_size, min_size, max_size, nb_samples; 238 | 239 | ref_clock = get_master_clock(vidState); 240 | diff = get_audio_clock(vidState) - ref_clock; 241 | 242 | if(diff < AV_NOSYNC_THRESHOLD) { 243 | // accumulate the diffs 244 | vidState->audio_diff_cum = diff + vidState->audio_diff_avg_coef 245 | * vidState->audio_diff_cum; 246 | if(vidState->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { 247 | vidState->audio_diff_avg_count++; 248 | } else { 249 | avg_diff = vidState->audio_diff_cum * (1.0 - vidState->audio_diff_avg_coef); 250 | if(fabs(avg_diff) >= vidState->audio_diff_threshold) { 251 | wanted_size = samples_size + ((int)(diff * vidState->audio_st->codec->sample_rate) * n); 252 | min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); 253 | max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); 254 | if(wanted_size < min_size) { 255 | wanted_size = min_size; 256 | } else if (wanted_size > max_size) { 257 | wanted_size = max_size; 258 | } 259 | if(wanted_size < samples_size) { 260 | /* remove samples */ 261 | samples_size = wanted_size; 262 | } else if(wanted_size > samples_size) { 263 | uint8_t *samples_end, *q; 264 | int nb; 265 | 266 | /* add samples by copying final sample*/ 267 | nb = (samples_size - wanted_size); 268 | samples_end = (uint8_t *)samples + samples_size - n; 269 | q = samples_end + n; 270 | while(nb > 0) { 271 | memcpy(q, samples_end, n); 272 | q += n; 273 | nb -= n; 274 | } 275 | samples_size = wanted_size; 276 | } 277 | } 278 | } 279 | } else { 280 | /* difference vidState TOO big; reset diff stuff */ 281 | vidState->audio_diff_avg_count = 0; 282 | vidState->audio_diff_cum = 0; 283 | } 284 | } 285 | return samples_size; 286 | } 287 | 288 | int audio_decode_frame(VideoState *vidState, uint8_t *audio_buf, int buf_size, double *pts_ptr) { 289 | 290 | int len1, data_size, n; 291 | AVPacket *pkt = &vidState->audio_pkt; 292 | double pts; 293 | static AVFrame *decoded_aframe; 294 | static AVPacket pktTemp; 295 | 296 | for(;;) { 297 | while(vidState->audio_pkt_size > 0) { 298 | int got_frame = 0; 299 | if (!decoded_aframe) { 300 | if (!(decoded_aframe = avcodec_alloc_frame())) { 301 | fprintf(stderr, "out of memory\n"); 302 | exit(1); 303 | } 304 | } else 305 | avcodec_get_frame_defaults(decoded_aframe); 306 | 307 | data_size = buf_size; 308 | //len1 = avcodec_decode_audio2(vidState->audio_st->codec, 309 | // (int16_t *)audio_buf, &data_size, 310 | // vidState->audio_pkt_data, vidState->audio_pkt_size); 311 | len1 = avcodec_decode_audio4(vidState->audio_st->codec, decoded_aframe, &got_frame, &pktTemp); 312 | if(len1 < 0) { 313 | /* if error, skip frame */ 314 | vidState->audio_pkt_size = 0; 315 | break; 316 | } 317 | 318 | if (got_frame) { 319 | //printf("\nGot frame!"); 320 | //printf("\nFrame data size: %d", sizeof(decoded_aframe->data[0])); 321 | data_size = av_samples_get_buffer_size(NULL, vidState->audio_st->codec->channels, 322 | decoded_aframe->nb_samples, 323 | vidState->audio_st->codec->sample_fmt, 1); 324 | if (data_size > buf_size) { 325 | data_size = buf_size; 326 | } 327 | memcpy(audio_buf, decoded_aframe->data[0], data_size); 328 | 329 | }else{ 330 | data_size = 0; 331 | } 332 | pktTemp.data += len1; 333 | pktTemp.size -= len1; 334 | vidState->audio_pkt_data += len1; 335 | vidState->audio_pkt_size -= len1; 336 | if(data_size <= 0) { 337 | /* No data yet, get more frames */ 338 | continue; 339 | } 340 | pts = vidState->audio_clock; 341 | *pts_ptr = pts; 342 | n = 2 * vidState->audio_st->codec->channels; 343 | vidState->audio_clock += (double)data_size / 344 | (double)(n * vidState->audio_st->codec->sample_rate); 345 | 346 | /* We have data, return it and come back for more later */ 347 | return data_size; 348 | } 349 | if(pkt->data) 350 | av_free_packet(pkt); 351 | 352 | if(vidState->quit) { 353 | return -1; 354 | } 355 | /* next packet */ 356 | if(packet_queue_get(&vidState->audioq, pkt, 1) < 0) { 357 | return -1; 358 | } 359 | pktTemp.data = pkt->data; 360 | pktTemp.size = pkt->size; 361 | vidState->audio_pkt_data = pkt->data; 362 | vidState->audio_pkt_size = pkt->size; 363 | /* if update, update the audio clock w/pts */ 364 | if(pkt->pts != AV_NOPTS_VALUE) { 365 | vidState->audio_clock = av_q2d(vidState->audio_st->time_base)*pkt->pts; 366 | } 367 | } 368 | } 369 | 370 | void audio_callback(void *userdata, Uint8 *stream, int len) { 371 | 372 | VideoState *vidState = (VideoState *)userdata; 373 | int len1, audio_size; 374 | double pts; 375 | 376 | while(len > 0) { 377 | if(vidState->audio_buf_index >= vidState->audio_buf_size) { 378 | /* We have already sent all our data; get more */ 379 | audio_size = audio_decode_frame(vidState, vidState->audio_buf, sizeof(vidState->audio_buf), &pts); 380 | if(audio_size < 0) { 381 | /* If error, output silence */ 382 | vidState->audio_buf_size = 1024; 383 | memset(vidState->audio_buf, 0, vidState->audio_buf_size); 384 | } else { 385 | audio_size = synchronize_audio(vidState, (int16_t *)vidState->audio_buf, 386 | audio_size, pts); 387 | vidState->audio_buf_size = audio_size; 388 | } 389 | vidState->audio_buf_index = 0; 390 | } 391 | len1 = vidState->audio_buf_size - vidState->audio_buf_index; 392 | if(len1 > len) 393 | len1 = len; 394 | memcpy(stream, (uint8_t *)vidState->audio_buf + vidState->audio_buf_index, len1); 395 | len -= len1; 396 | stream += len1; 397 | vidState->audio_buf_index += len1; 398 | } 399 | } 400 | 401 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 402 | SDL_Event event; 403 | event.type = FF_REFRESH_EVENT; 404 | event.user.data1 = opaque; 405 | SDL_PushEvent(&event); 406 | return 0; /* 0 means stop timer */ 407 | } 408 | 409 | /* schedule a video refresh in 'delay' ms */ 410 | static void schedule_refresh(VideoState *vidState, int delay) { 411 | SDL_AddTimer(delay, sdl_refresh_timer_cb, vidState); 412 | } 413 | 414 | void video_display(VideoState *vidState) { 415 | 416 | SDL_Rect rect; 417 | VideoPicture *vp; 418 | AVPicture pict; 419 | float aspect_ratio; 420 | int w, h, x, y; 421 | int i; 422 | 423 | vp = &vidState->pictq[vidState->pictq_rindex]; 424 | if(vp->bmp) { 425 | if(vidState->video_st->codec->sample_aspect_ratio.num == 0) { 426 | aspect_ratio = 0; 427 | } else { 428 | aspect_ratio = av_q2d(vidState->video_st->codec->sample_aspect_ratio) * 429 | vidState->video_st->codec->width / vidState->video_st->codec->height; 430 | } 431 | if(aspect_ratio <= 0.0) { 432 | aspect_ratio = (float)vidState->video_st->codec->width / 433 | (float)vidState->video_st->codec->height; 434 | } 435 | h = screen->h; 436 | w = ((int)rint(h * aspect_ratio)) & -3; 437 | if(w > screen->w) { 438 | w = screen->w; 439 | h = ((int)rint(w / aspect_ratio)) & -3; 440 | } 441 | x = (screen->w - w) / 2; 442 | y = (screen->h - h) / 2; 443 | 444 | rect.x = x; 445 | rect.y = y; 446 | rect.w = w; 447 | rect.h = h; 448 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 449 | } 450 | } 451 | 452 | void video_refresh_timer(void *userdata) { 453 | 454 | VideoState *vidState = (VideoState *)userdata; 455 | VideoPicture *vp; 456 | double actual_delay, delay, sync_threshold, ref_clock, diff; 457 | 458 | if(vidState->video_st) { 459 | if(vidState->pictq_size == 0) { 460 | schedule_refresh(vidState, 1); 461 | } else { 462 | vp = &vidState->pictq[vidState->pictq_rindex]; 463 | 464 | vidState->video_current_pts = vp->pts; 465 | vidState->video_current_pts_time = av_gettime(); 466 | 467 | delay = vp->pts - vidState->frame_last_pts; /* the pts from last time */ 468 | if(delay <= 0 || delay >= 1.0) { 469 | /* if incorrect delay, use previous one */ 470 | delay = vidState->frame_last_delay; 471 | } 472 | /* save for next time */ 473 | vidState->frame_last_delay = delay; 474 | vidState->frame_last_pts = vp->pts; 475 | 476 | /* update delay to sync to audio if not master source */ 477 | if(vidState->av_sync_type != AV_SYNC_VIDEO_MASTER) { 478 | ref_clock = get_master_clock(vidState); 479 | diff = vp->pts - ref_clock; 480 | 481 | /* Skip or repeat the frame. Take delay into account 482 | FFPlay still doesn't "know if this vidState the best guess." */ 483 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 484 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 485 | if(diff <= -sync_threshold) { 486 | delay = 0; 487 | } else if(diff >= sync_threshold) { 488 | delay = 2 * delay; 489 | } 490 | } 491 | } 492 | 493 | vidState->frame_timer += delay; 494 | /* computer the REAL delay */ 495 | actual_delay = vidState->frame_timer - (av_gettime() / 1000000.0); 496 | if(actual_delay < 0.010) { 497 | /* Really it should skip the picture instead */ 498 | actual_delay = 0.010; 499 | } 500 | schedule_refresh(vidState, (int)(actual_delay * 1000 + 0.5)); 501 | 502 | /* show the picture! */ 503 | video_display(vidState); 504 | 505 | /* update queue for next picture! */ 506 | if(++vidState->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 507 | vidState->pictq_rindex = 0; 508 | } 509 | SDL_LockMutex(vidState->pictq_mutex); 510 | vidState->pictq_size--; 511 | SDL_CondSignal(vidState->pictq_cond); 512 | SDL_UnlockMutex(vidState->pictq_mutex); 513 | } 514 | } else { 515 | schedule_refresh(vidState, 100); 516 | } 517 | } 518 | 519 | void alloc_picture(void *userdata) { 520 | 521 | VideoState *vidState = (VideoState *)userdata; 522 | VideoPicture *vp; 523 | 524 | vp = &vidState->pictq[vidState->pictq_windex]; 525 | if(vp->bmp) { 526 | // we already have one make another, bigger/smaller 527 | SDL_FreeYUVOverlay(vp->bmp); 528 | } 529 | // Allocate a place to put our YUV image on that screen 530 | vp->bmp = SDL_CreateYUVOverlay(vidState->video_st->codec->width, 531 | vidState->video_st->codec->height, 532 | SDL_YV12_OVERLAY, 533 | screen); 534 | vp->width = vidState->video_st->codec->width; 535 | vp->height = vidState->video_st->codec->height; 536 | 537 | SDL_LockMutex(vidState->pictq_mutex); 538 | vp->allocated = 1; 539 | SDL_CondSignal(vidState->pictq_cond); 540 | SDL_UnlockMutex(vidState->pictq_mutex); 541 | 542 | } 543 | 544 | int queue_picture(VideoState *vidState, AVFrame *pFrame, double pts) { 545 | 546 | VideoPicture *vp; 547 | int dst_pix_fmt; 548 | AVPicture pict; 549 | 550 | /* wait until we have space for a new pic */ 551 | SDL_LockMutex(vidState->pictq_mutex); 552 | while(vidState->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 553 | !vidState->quit) { 554 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 555 | } 556 | SDL_UnlockMutex(vidState->pictq_mutex); 557 | 558 | if(vidState->quit) 559 | return -1; 560 | 561 | // windex vidState set to 0 initially 562 | vp = &vidState->pictq[vidState->pictq_windex]; 563 | 564 | /* allocate or resize the buffer! */ 565 | if(!vp->bmp || 566 | vp->width != vidState->video_st->codec->width || 567 | vp->height != vidState->video_st->codec->height) { 568 | SDL_Event event; 569 | 570 | vp->allocated = 0; 571 | /* we have to do it in the main thread */ 572 | event.type = FF_ALLOC_EVENT; 573 | event.user.data1 = vidState; 574 | SDL_PushEvent(&event); 575 | 576 | /* wait until we have a picture allocated */ 577 | SDL_LockMutex(vidState->pictq_mutex); 578 | while(!vp->allocated && !vidState->quit) { 579 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 580 | } 581 | SDL_UnlockMutex(vidState->pictq_mutex); 582 | if(vidState->quit) { 583 | return -1; 584 | } 585 | } 586 | /* We have a place to put our picture on the queue */ 587 | /* If we are skipping a frame, do we set this to null 588 | but still return vp->allocated = 1? */ 589 | 590 | 591 | if(vp->bmp) { 592 | 593 | SDL_LockYUVOverlay(vp->bmp); 594 | 595 | dst_pix_fmt = PIX_FMT_YUV420P; 596 | /* point pict at the queue */ 597 | 598 | pict.data[0] = vp->bmp->pixels[0]; 599 | pict.data[1] = vp->bmp->pixels[2]; 600 | pict.data[2] = vp->bmp->pixels[1]; 601 | 602 | pict.linesize[0] = vp->bmp->pitches[0]; 603 | pict.linesize[1] = vp->bmp->pitches[2]; 604 | pict.linesize[2] = vp->bmp->pitches[1]; 605 | 606 | // Convert the image into YUV format that SDL uses 607 | // Convert the image into YUV format that SDL uses 608 | // img_convert(&pict, dst_pix_fmt, 609 | // (AVPicture *)pFrame, is->video_st->codec->pix_fmt, 610 | // is->video_st->codec->width, is->video_st->codec->height); 611 | 612 | static struct SwsContext *img_convert_ctx; 613 | int w = vidState->video_st->codec->width; 614 | int h = vidState->video_st->codec->height; 615 | if(!img_convert_ctx) 616 | img_convert_ctx = sws_getContext(w, h, vidState->video_st->codec->pix_fmt, 617 | w, h, dst_pix_fmt, 618 | SWS_X, NULL, NULL, NULL); 619 | 620 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 621 | pFrame->linesize, 0, h, 622 | pict.data, pict.linesize); 623 | 624 | SDL_UnlockYUVOverlay(vp->bmp); 625 | vp->pts = pts; 626 | 627 | /* now we inform our display thread that we have a pic ready */ 628 | if(++vidState->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 629 | vidState->pictq_windex = 0; 630 | } 631 | SDL_LockMutex(vidState->pictq_mutex); 632 | vidState->pictq_size++; 633 | SDL_UnlockMutex(vidState->pictq_mutex); 634 | } 635 | return 0; 636 | } 637 | 638 | double synchronize_video(VideoState *vidState, AVFrame *src_frame, double pts) { 639 | 640 | double frame_delay; 641 | 642 | if(pts != 0) { 643 | /* if we have pts, set video clock to it */ 644 | vidState->video_clock = pts; 645 | } else { 646 | /* if we aren't given a pts, set it to the clock */ 647 | pts = vidState->video_clock; 648 | } 649 | /* update the video clock */ 650 | frame_delay = av_q2d(vidState->video_st->codec->time_base); 651 | /* if we are repeating a frame, adjust clock accordingly */ 652 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); 653 | vidState->video_clock += frame_delay; 654 | return pts; 655 | } 656 | 657 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 658 | 659 | /* These are called whenever we allocate a frame 660 | * buffer. We use this to store the global_pts in 661 | * a frame at the time it vidState allocated. 662 | */ 663 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 664 | int ret = avcodec_default_get_buffer(c, pic); 665 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 666 | *pts = global_video_pkt_pts; 667 | pic->opaque = pts; 668 | return ret; 669 | } 670 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 671 | if(pic) av_freep(&pic->opaque); 672 | avcodec_default_release_buffer(c, pic); 673 | } 674 | 675 | int video_thread(void *arg) { 676 | VideoState *vidState = (VideoState *)arg; 677 | AVPacket pkt1, *packet = &pkt1; 678 | int len1, frameFinished; 679 | AVFrame *pFrame; 680 | double pts; 681 | 682 | pFrame = avcodec_alloc_frame(); 683 | 684 | for(;;) { 685 | if(packet_queue_get(&vidState->videoq, packet, 1) < 0) { 686 | // means we quit getting packets 687 | break; 688 | } 689 | pts = 0; 690 | 691 | // Save global pts to be stored in pFrame in first call 692 | global_video_pkt_pts = packet->pts; 693 | // Decode video frame 694 | //len1 = avcodec_decode_video(vidState->video_st->codec, pFrame, &frameFinished, packet->data, packet->size); 695 | avcodec_decode_video2(vidState->video_st->codec, pFrame, &frameFinished, packet); 696 | if(packet->dts == AV_NOPTS_VALUE 697 | && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 698 | pts = *(uint64_t *)pFrame->opaque; 699 | } else if(packet->dts != AV_NOPTS_VALUE) { 700 | pts = packet->dts; 701 | } else { 702 | pts = 0; 703 | } 704 | pts *= av_q2d(vidState->video_st->time_base); 705 | 706 | // Did we get a video frame? 707 | if(frameFinished) { 708 | pts = synchronize_video(vidState, pFrame, pts); 709 | if(queue_picture(vidState, pFrame, pts) < 0) { 710 | break; 711 | } 712 | } 713 | av_free_packet(packet); 714 | } 715 | av_free(pFrame); 716 | return 0; 717 | } 718 | 719 | int stream_component_open(VideoState *vidState, int stream_index) { 720 | 721 | AVFormatContext *pFormatCtx = vidState->pFormatCtx; 722 | AVCodecContext *codecCtx; 723 | AVCodec *codec; 724 | SDL_AudioSpec wanted_spec, spec; 725 | 726 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 727 | return -1; 728 | } 729 | 730 | // Get a pointer to the codec context for the video stream 731 | codecCtx = pFormatCtx->streams[stream_index]->codec; 732 | 733 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 734 | // Set audio settings from codec info 735 | wanted_spec.freq = codecCtx->sample_rate; 736 | wanted_spec.format = AUDIO_S16SYS; 737 | wanted_spec.channels = codecCtx->channels; 738 | wanted_spec.silence = 0; 739 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 740 | wanted_spec.callback = audio_callback; 741 | wanted_spec.userdata = vidState; 742 | 743 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 744 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 745 | return -1; 746 | } 747 | vidState->audio_hw_buf_size = spec.size; 748 | } 749 | codec = avcodec_find_decoder(codecCtx->codec_id); 750 | if(!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) { 751 | fprintf(stderr, "Unsupported codec!\n"); 752 | return -1; 753 | } 754 | 755 | switch(codecCtx->codec_type) { 756 | case AVMEDIA_TYPE_AUDIO: 757 | vidState->audioStream = stream_index; 758 | vidState->audio_st = pFormatCtx->streams[stream_index]; 759 | vidState->audio_buf_size = 0; 760 | vidState->audio_buf_index = 0; 761 | 762 | /* averaging filter for audio sync */ 763 | vidState->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); 764 | vidState->audio_diff_avg_count = 0; 765 | /* Correct audio only if larger error than this */ 766 | vidState->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; 767 | 768 | memset(&vidState->audio_pkt, 0, sizeof(vidState->audio_pkt)); 769 | packet_queue_init(&vidState->audioq); 770 | SDL_PauseAudio(0); 771 | break; 772 | case AVMEDIA_TYPE_VIDEO: 773 | vidState->videoStream = stream_index; 774 | vidState->video_st = pFormatCtx->streams[stream_index]; 775 | 776 | vidState->frame_timer = (double)av_gettime() / 1000000.0; 777 | vidState->frame_last_delay = 40e-3; 778 | vidState->video_current_pts_time = av_gettime(); 779 | 780 | packet_queue_init(&vidState->videoq); 781 | vidState->video_tid = SDL_CreateThread(video_thread, vidState); 782 | codecCtx->get_buffer = our_get_buffer; 783 | codecCtx->release_buffer = our_release_buffer; 784 | break; 785 | default: 786 | break; 787 | } 788 | 789 | 790 | } 791 | 792 | int decode_interrupt_cb(void * ctx) { 793 | return (global_video_state && global_video_state->quit); 794 | } 795 | 796 | const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; 797 | 798 | int decode_thread(void *arg) { 799 | 800 | VideoState *vidState = (VideoState *)arg; 801 | AVFormatContext *pFormatCtx; 802 | AVPacket pkt1, *packet = &pkt1; 803 | 804 | int video_index = -1; 805 | int audio_index = -1; 806 | int i; 807 | 808 | vidState->videoStream=-1; 809 | vidState->audioStream=-1; 810 | 811 | global_video_state = vidState; 812 | // will interrupt blocking functions if we quit! 813 | //url_set_interrupt_cb(decode_interrupt_cb); 814 | pFormatCtx = avformat_alloc_context(); 815 | pFormatCtx->interrupt_callback = int_cb; 816 | 817 | if (avio_open2(&pFormatCtx->pb, vidState->filename, AVIO_FLAG_READ, &pFormatCtx->interrupt_callback, NULL)) 818 | return -1; 819 | 820 | 821 | // Open video file 822 | if(avformat_open_input(&pFormatCtx, vidState->filename, NULL, NULL)!=0) 823 | return -1; // Couldn't open file 824 | 825 | vidState->pFormatCtx = pFormatCtx; 826 | 827 | // Retrieve stream information 828 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 829 | return -1; // Couldn't find stream information 830 | 831 | // Dump information about file onto standard error 832 | av_dump_format(pFormatCtx, 0, vidState->filename, 0); 833 | 834 | // Find the first video stream 835 | 836 | for(i=0; inb_streams; i++) { 837 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 838 | video_index < 0) { 839 | video_index=i; 840 | } 841 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 842 | audio_index < 0) { 843 | audio_index=i; 844 | } 845 | } 846 | if(audio_index >= 0) { 847 | stream_component_open(vidState, audio_index); 848 | } 849 | if(video_index >= 0) { 850 | stream_component_open(vidState, video_index); 851 | } 852 | 853 | if(vidState->videoStream < 0 || vidState->audioStream < 0) { 854 | fprintf(stderr, "%s: could not open codecs\n", vidState->filename); 855 | goto fail; 856 | } 857 | 858 | // main decode loop 859 | 860 | for(;;) { 861 | if(vidState->quit) { 862 | break; 863 | } 864 | // seek stuff goes here 865 | if(vidState->audioq.size > MAX_AUDIOQ_SIZE || 866 | vidState->videoq.size > MAX_VIDEOQ_SIZE) { 867 | SDL_Delay(10); 868 | continue; 869 | } 870 | if(av_read_frame(vidState->pFormatCtx, packet) < 0) { 871 | //if(url_ferror(&pFormatCtx->pb) == 0) { 872 | if(&pFormatCtx->pb && &pFormatCtx->pb->error) { 873 | SDL_Delay(100); /* no error; wait for user input */ 874 | continue; 875 | } else { 876 | break; 877 | } 878 | } 879 | // Is this a packet from the video stream? 880 | if(packet->stream_index == vidState->videoStream) { 881 | packet_queue_put(&vidState->videoq, packet); 882 | } else if(packet->stream_index == vidState->audioStream) { 883 | packet_queue_put(&vidState->audioq, packet); 884 | } else { 885 | av_free_packet(packet); 886 | } 887 | } 888 | /* all done - wait for it */ 889 | while(!vidState->quit) { 890 | SDL_Delay(100); 891 | } 892 | 893 | fail: 894 | { 895 | SDL_Event event; 896 | event.type = FF_QUIT_EVENT; 897 | event.user.data1 = vidState; 898 | SDL_PushEvent(&event); 899 | } 900 | return 0; 901 | } 902 | 903 | int main(int argc, char *argv[]) { 904 | 905 | SDL_Event event; 906 | 907 | VideoState *vidState; 908 | 909 | vidState = av_mallocz(sizeof(VideoState)); 910 | 911 | if(argc < 2) { 912 | fprintf(stderr, "Usage: test \n"); 913 | exit(1); 914 | } 915 | // Register all formats and codecs 916 | av_register_all(); 917 | 918 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 919 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 920 | exit(1); 921 | } 922 | 923 | // Make a screen to put our video 924 | #ifndef __DARWIN__ 925 | screen = SDL_SetVideoMode(640, 480, 0, 0); 926 | #else 927 | screen = SDL_SetVideoMode(640, 480, 24, 0); 928 | #endif 929 | if(!screen) { 930 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 931 | exit(1); 932 | } 933 | 934 | av_strlcpy(vidState->filename, argv[1], sizeof(vidState->filename)); 935 | 936 | vidState->pictq_mutex = SDL_CreateMutex(); 937 | vidState->pictq_cond = SDL_CreateCond(); 938 | 939 | schedule_refresh(vidState, 40); 940 | 941 | vidState->av_sync_type = DEFAULT_AV_SYNC_TYPE; 942 | vidState->parse_tid = SDL_CreateThread(decode_thread, vidState); 943 | if(!vidState->parse_tid) { 944 | av_free(vidState); 945 | return -1; 946 | } 947 | for(;;) { 948 | 949 | SDL_WaitEvent(&event); 950 | switch(event.type) { 951 | case FF_QUIT_EVENT: 952 | case SDL_QUIT: 953 | vidState->quit = 1; 954 | SDL_Quit(); 955 | exit(0); 956 | break; 957 | case FF_ALLOC_EVENT: 958 | alloc_picture(event.user.data1); 959 | break; 960 | case FF_REFRESH_EVENT: 961 | video_refresh_timer(event.user.data1); 962 | break; 963 | default: 964 | break; 965 | } 966 | } 967 | return 0; 968 | 969 | } 970 | -------------------------------------------------------------------------------- /tutorial07.c: -------------------------------------------------------------------------------- 1 | // tutorial07.c 2 | // A pedagogical video player that really works! Now with seeking features. 3 | // 4 | // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, 5 | // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de) 6 | // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1 7 | // Use 8 | // 9 | // gcc -o tutorial07 tutorial07.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs` 10 | // to build (assuming libavformat and libavcodec are correctly installed, 11 | // and assuming you have sdl-config. Please refer to SDL docs for your installation.) 12 | // 13 | // Run using 14 | // tutorial07 myvideofile.mpg 15 | // 16 | // to play the video. 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | #include 26 | #ifdef __MINGW32__ 27 | #undef main /* Prevents SDL from overriding main() */ 28 | #endif 29 | #include 30 | #include 31 | 32 | #define SDL_AUDIO_BUFFER_SIZE 1024 33 | #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) 34 | #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) 35 | #define AV_SYNC_THRESHOLD 0.01 36 | #define AV_NOSYNC_THRESHOLD 10.0 37 | #define SAMPLE_CORRECTION_PERCENT_MAX 10 38 | #define AUDIO_DIFF_AVG_NB 20 39 | #define FF_ALLOC_EVENT (SDL_USEREVENT) 40 | #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) 41 | #define FF_QUIT_EVENT (SDL_USEREVENT + 2) 42 | #define VIDEO_PICTURE_QUEUE_SIZE 1 43 | #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER 44 | 45 | typedef struct PacketQueue { 46 | AVPacketList *first_pkt, *last_pkt; 47 | int nb_packets; 48 | int size; 49 | SDL_mutex *mutex; 50 | SDL_cond *cond; 51 | } PacketQueue; 52 | typedef struct VideoPicture { 53 | SDL_Overlay *bmp; 54 | int width, height; /* source height & width */ 55 | int allocated; 56 | double pts; 57 | } VideoPicture; 58 | 59 | typedef struct VideoState { 60 | AVFormatContext *pFormatCtx; 61 | int videoStream, audioStream; 62 | 63 | int av_sync_type; 64 | double external_clock; /* external clock base */ 65 | int64_t external_clock_time; 66 | int seek_req; 67 | int seek_flags; 68 | int64_t seek_pos; 69 | 70 | double audio_clock; 71 | AVStream *audio_st; 72 | PacketQueue audioq; 73 | uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; 74 | unsigned int audio_buf_size; 75 | unsigned int audio_buf_index; 76 | AVPacket audio_pkt; 77 | uint8_t *audio_pkt_data; 78 | int audio_pkt_size; 79 | int audio_hw_buf_size; 80 | double audio_diff_cum; /* used for AV difference average computation */ 81 | double audio_diff_avg_coef; 82 | double audio_diff_threshold; 83 | int audio_diff_avg_count; 84 | double frame_timer; 85 | double frame_last_pts; 86 | double frame_last_delay; 87 | double video_clock; ///mutex = SDL_CreateMutex(); 119 | q->cond = SDL_CreateCond(); 120 | } 121 | int packet_queue_put(PacketQueue *q, AVPacket *pkt) { 122 | 123 | AVPacketList *pkt1; 124 | if(pkt != &flush_pkt && av_dup_packet(pkt) < 0) { 125 | return -1; 126 | } 127 | pkt1 = av_malloc(sizeof(AVPacketList)); 128 | if (!pkt1) 129 | return -1; 130 | pkt1->pkt = *pkt; 131 | pkt1->next = NULL; 132 | 133 | SDL_LockMutex(q->mutex); 134 | 135 | if (!q->last_pkt) 136 | q->first_pkt = pkt1; 137 | else 138 | q->last_pkt->next = pkt1; 139 | q->last_pkt = pkt1; 140 | q->nb_packets++; 141 | q->size += pkt1->pkt.size; 142 | SDL_CondSignal(q->cond); 143 | 144 | SDL_UnlockMutex(q->mutex); 145 | return 0; 146 | } 147 | static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 148 | { 149 | AVPacketList *pkt1; 150 | int ret; 151 | 152 | SDL_LockMutex(q->mutex); 153 | 154 | for(;;) { 155 | 156 | if(global_video_state->quit) { 157 | ret = -1; 158 | break; 159 | } 160 | 161 | pkt1 = q->first_pkt; 162 | if (pkt1) { 163 | q->first_pkt = pkt1->next; 164 | if (!q->first_pkt) 165 | q->last_pkt = NULL; 166 | q->nb_packets--; 167 | q->size -= pkt1->pkt.size; 168 | *pkt = pkt1->pkt; 169 | av_free(pkt1); 170 | ret = 1; 171 | break; 172 | } else if (!block) { 173 | ret = 0; 174 | break; 175 | } else { 176 | SDL_CondWait(q->cond, q->mutex); 177 | } 178 | } 179 | SDL_UnlockMutex(q->mutex); 180 | return ret; 181 | } 182 | static void packet_queue_flush(PacketQueue *q) { 183 | AVPacketList *pkt, *pkt1; 184 | 185 | SDL_LockMutex(q->mutex); 186 | for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) { 187 | pkt1 = pkt->next; 188 | av_free_packet(&pkt->pkt); 189 | av_freep(&pkt); 190 | } 191 | q->last_pkt = NULL; 192 | q->first_pkt = NULL; 193 | q->nb_packets = 0; 194 | q->size = 0; 195 | SDL_UnlockMutex(q->mutex); 196 | } 197 | double get_audio_clock(VideoState *vidState) { 198 | double pts; 199 | int hw_buf_size, bytes_per_sec, n; 200 | 201 | pts = vidState->audio_clock; /* maintained in the audio thread */ 202 | hw_buf_size = vidState->audio_buf_size - vidState->audio_buf_index; 203 | bytes_per_sec = 0; 204 | n = vidState->audio_st->codec->channels * 2; 205 | if(vidState->audio_st) { 206 | bytes_per_sec = vidState->audio_st->codec->sample_rate * n; 207 | } 208 | if(bytes_per_sec) { 209 | pts -= (double)hw_buf_size / bytes_per_sec; 210 | } 211 | return pts; 212 | } 213 | double get_video_clock(VideoState *vidState) { 214 | double delta; 215 | 216 | delta = (av_gettime() - vidState->video_current_pts_time) / 1000000.0; 217 | return vidState->video_current_pts + delta; 218 | } 219 | double get_external_clock(VideoState *vidState) { 220 | return av_gettime() / 1000000.0; 221 | } 222 | double get_master_clock(VideoState *vidState) { 223 | if(vidState->av_sync_type == AV_SYNC_VIDEO_MASTER) { 224 | return get_video_clock(vidState); 225 | } else if(vidState->av_sync_type == AV_SYNC_AUDIO_MASTER) { 226 | return get_audio_clock(vidState); 227 | } else { 228 | return get_external_clock(vidState); 229 | } 230 | } 231 | /* Add or subtract samples to get a better sync, return new 232 | audio buffer size */ 233 | int synchronize_audio(VideoState *vidState, short *samples, 234 | int samples_size, double pts) { 235 | int n; 236 | double ref_clock; 237 | 238 | n = 2 * vidState->audio_st->codec->channels; 239 | 240 | if(vidState->av_sync_type != AV_SYNC_AUDIO_MASTER) { 241 | double diff, avg_diff; 242 | int wanted_size, min_size, max_size, nb_samples; 243 | 244 | ref_clock = get_master_clock(vidState); 245 | diff = get_audio_clock(vidState) - ref_clock; 246 | 247 | if(diff < AV_NOSYNC_THRESHOLD) { 248 | // accumulate the diffs 249 | vidState->audio_diff_cum = diff + vidState->audio_diff_avg_coef 250 | * vidState->audio_diff_cum; 251 | if(vidState->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { 252 | vidState->audio_diff_avg_count++; 253 | } else { 254 | avg_diff = vidState->audio_diff_cum * (1.0 - vidState->audio_diff_avg_coef); 255 | if(fabs(avg_diff) >= vidState->audio_diff_threshold) { 256 | wanted_size = samples_size + ((int)(diff * vidState->audio_st->codec->sample_rate) * n); 257 | min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); 258 | max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); 259 | if(wanted_size < min_size) { 260 | wanted_size = min_size; 261 | } else if (wanted_size > max_size) { 262 | wanted_size = max_size; 263 | } 264 | if(wanted_size < samples_size) { 265 | /* remove samples */ 266 | samples_size = wanted_size; 267 | } else if(wanted_size > samples_size) { 268 | uint8_t *samples_end, *q; 269 | int nb; 270 | 271 | /* add samples by copying final sample*/ 272 | nb = (samples_size - wanted_size); 273 | samples_end = (uint8_t *)samples + samples_size - n; 274 | q = samples_end + n; 275 | while(nb > 0) { 276 | memcpy(q, samples_end, n); 277 | q += n; 278 | nb -= n; 279 | } 280 | samples_size = wanted_size; 281 | } 282 | } 283 | } 284 | } else { 285 | /* difference vidState TOO big; reset diff stuff */ 286 | vidState->audio_diff_avg_count = 0; 287 | vidState->audio_diff_cum = 0; 288 | } 289 | } 290 | return samples_size; 291 | } 292 | 293 | int audio_decode_frame(VideoState *vidState, uint8_t *audio_buf, int buf_size, double *pts_ptr) { 294 | 295 | int len1, data_size, n; 296 | AVPacket *pkt = &vidState->audio_pkt; 297 | static AVPacket pktTemp; 298 | double pts; 299 | static AVFrame *decoded_aframe; 300 | 301 | for(;;) { 302 | while(vidState->audio_pkt_size > 0) { 303 | int got_frame = 0; 304 | if (!decoded_aframe) { 305 | if (!(decoded_aframe = avcodec_alloc_frame())) { 306 | fprintf(stderr, "out of memory\n"); 307 | exit(1); 308 | } 309 | } else 310 | avcodec_get_frame_defaults(decoded_aframe); 311 | data_size = buf_size; 312 | len1 = avcodec_decode_audio4(vidState->audio_st->codec, decoded_aframe, &got_frame, &pktTemp); 313 | 314 | if(len1 < 0) { 315 | /* if error, skip frame */ 316 | vidState->audio_pkt_size = 0; 317 | break; 318 | } 319 | 320 | if (got_frame) { 321 | //printf("\nGot frame!"); 322 | //printf("\nFrame data size: %d", sizeof(decoded_aframe->data[0])); 323 | data_size = av_samples_get_buffer_size(NULL, vidState->audio_st->codec->channels, 324 | decoded_aframe->nb_samples, 325 | vidState->audio_st->codec->sample_fmt, 1); 326 | if (data_size > buf_size) { 327 | data_size = buf_size; 328 | } 329 | memcpy(audio_buf, decoded_aframe->data[0], data_size); 330 | 331 | }else{ 332 | data_size = 0; 333 | } 334 | pktTemp.data += len1; 335 | pktTemp.size -= len1; 336 | vidState->audio_pkt_data += len1; 337 | vidState->audio_pkt_size -= len1; 338 | if(data_size <= 0) { 339 | /* No data yet, get more frames */ 340 | continue; 341 | } 342 | pts = vidState->audio_clock; 343 | *pts_ptr = pts; 344 | n = 2 * vidState->audio_st->codec->channels; 345 | vidState->audio_clock += (double)data_size / 346 | (double)(n * vidState->audio_st->codec->sample_rate); 347 | 348 | /* We have data, return it and come back for more later */ 349 | return data_size; 350 | } 351 | if(pkt->data) 352 | av_free_packet(pkt); 353 | 354 | if(vidState->quit) { 355 | return -1; 356 | } 357 | /* next packet */ 358 | if(packet_queue_get(&vidState->audioq, pkt, 1) < 0) { 359 | return -1; 360 | } 361 | if(pkt->data == flush_pkt.data) { 362 | avcodec_flush_buffers(vidState->audio_st->codec); 363 | continue; 364 | } 365 | pktTemp.data = pkt->data; 366 | pktTemp.size = pkt->size; 367 | vidState->audio_pkt_data = pkt->data; 368 | vidState->audio_pkt_size = pkt->size; 369 | /* if update, update the audio clock w/pts */ 370 | if(pkt->pts != AV_NOPTS_VALUE) { 371 | vidState->audio_clock = av_q2d(vidState->audio_st->time_base)*pkt->pts; 372 | } 373 | } 374 | } 375 | 376 | void audio_callback(void *userdata, Uint8 *stream, int len) { 377 | 378 | VideoState *vidState = (VideoState *)userdata; 379 | int len1, audio_size; 380 | double pts; 381 | 382 | while(len > 0) { 383 | if(vidState->audio_buf_index >= vidState->audio_buf_size) { 384 | /* We have already sent all our data; get more */ 385 | audio_size = audio_decode_frame(vidState, vidState->audio_buf, sizeof(vidState->audio_buf), &pts); 386 | if(audio_size < 0) { 387 | /* If error, output silence */ 388 | vidState->audio_buf_size = 1024; 389 | memset(vidState->audio_buf, 0, vidState->audio_buf_size); 390 | } else { 391 | audio_size = synchronize_audio(vidState, (int16_t *)vidState->audio_buf, 392 | audio_size, pts); 393 | vidState->audio_buf_size = audio_size; 394 | } 395 | vidState->audio_buf_index = 0; 396 | } 397 | len1 = vidState->audio_buf_size - vidState->audio_buf_index; 398 | if(len1 > len) 399 | len1 = len; 400 | memcpy(stream, (uint8_t *)vidState->audio_buf + vidState->audio_buf_index, len1); 401 | len -= len1; 402 | stream += len1; 403 | vidState->audio_buf_index += len1; 404 | } 405 | } 406 | 407 | static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) { 408 | SDL_Event event; 409 | event.type = FF_REFRESH_EVENT; 410 | event.user.data1 = opaque; 411 | SDL_PushEvent(&event); 412 | return 0; /* 0 means stop timer */ 413 | } 414 | 415 | /* schedule a video refresh in 'delay' ms */ 416 | static void schedule_refresh(VideoState *vidState, int delay) { 417 | SDL_AddTimer(delay, sdl_refresh_timer_cb, vidState); 418 | } 419 | 420 | void video_display(VideoState *vidState) { 421 | 422 | SDL_Rect rect; 423 | VideoPicture *vp; 424 | AVPicture pict; 425 | float aspect_ratio; 426 | int w, h, x, y; 427 | int i; 428 | 429 | vp = &vidState->pictq[vidState->pictq_rindex]; 430 | if(vp->bmp) { 431 | if(vidState->video_st->codec->sample_aspect_ratio.num == 0) { 432 | aspect_ratio = 0; 433 | } else { 434 | aspect_ratio = av_q2d(vidState->video_st->codec->sample_aspect_ratio) * 435 | vidState->video_st->codec->width / vidState->video_st->codec->height; 436 | } 437 | if(aspect_ratio <= 0.0) { 438 | aspect_ratio = (float)vidState->video_st->codec->width / 439 | (float)vidState->video_st->codec->height; 440 | } 441 | h = screen->h; 442 | w = ((int)rint(h * aspect_ratio)) & -3; 443 | if(w > screen->w) { 444 | w = screen->w; 445 | h = ((int)rint(w / aspect_ratio)) & -3; 446 | } 447 | x = (screen->w - w) / 2; 448 | y = (screen->h - h) / 2; 449 | 450 | rect.x = x; 451 | rect.y = y; 452 | rect.w = w; 453 | rect.h = h; 454 | SDL_DisplayYUVOverlay(vp->bmp, &rect); 455 | } 456 | } 457 | 458 | void video_refresh_timer(void *userdata) { 459 | 460 | VideoState *vidState = (VideoState *)userdata; 461 | VideoPicture *vp; 462 | double actual_delay, delay, sync_threshold, ref_clock, diff; 463 | 464 | if(vidState->video_st) { 465 | if(vidState->pictq_size == 0) { 466 | schedule_refresh(vidState, 1); 467 | } else { 468 | vp = &vidState->pictq[vidState->pictq_rindex]; 469 | 470 | vidState->video_current_pts = vp->pts; 471 | vidState->video_current_pts_time = av_gettime(); 472 | 473 | delay = vp->pts - vidState->frame_last_pts; /* the pts from last time */ 474 | if(delay <= 0 || delay >= 1.0) { 475 | /* if incorrect delay, use previous one */ 476 | delay = vidState->frame_last_delay; 477 | } 478 | /* save for next time */ 479 | vidState->frame_last_delay = delay; 480 | vidState->frame_last_pts = vp->pts; 481 | 482 | /* update delay to sync to audio if not master source */ 483 | if(vidState->av_sync_type != AV_SYNC_VIDEO_MASTER) { 484 | ref_clock = get_master_clock(vidState); 485 | diff = vp->pts - ref_clock; 486 | 487 | /* Skip or repeat the frame. Take delay into account 488 | FFPlay still doesn't "know if this vidState the best guess." */ 489 | sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; 490 | if(fabs(diff) < AV_NOSYNC_THRESHOLD) { 491 | if(diff <= -sync_threshold) { 492 | delay = 0; 493 | } else if(diff >= sync_threshold) { 494 | delay = 2 * delay; 495 | } 496 | } 497 | } 498 | 499 | vidState->frame_timer += delay; 500 | /* computer the REAL delay */ 501 | actual_delay = vidState->frame_timer - (av_gettime() / 1000000.0); 502 | if(actual_delay < 0.010) { 503 | /* Really it should skip the picture instead */ 504 | actual_delay = 0.010; 505 | } 506 | schedule_refresh(vidState, (int)(actual_delay * 1000 + 0.5)); 507 | 508 | /* show the picture! */ 509 | video_display(vidState); 510 | 511 | /* update queue for next picture! */ 512 | if(++vidState->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { 513 | vidState->pictq_rindex = 0; 514 | } 515 | SDL_LockMutex(vidState->pictq_mutex); 516 | vidState->pictq_size--; 517 | SDL_CondSignal(vidState->pictq_cond); 518 | SDL_UnlockMutex(vidState->pictq_mutex); 519 | } 520 | } else { 521 | schedule_refresh(vidState, 100); 522 | } 523 | } 524 | 525 | void alloc_picture(void *userdata) { 526 | 527 | VideoState *vidState = (VideoState *)userdata; 528 | VideoPicture *vp; 529 | 530 | vp = &vidState->pictq[vidState->pictq_windex]; 531 | if(vp->bmp) { 532 | // we already have one make another, bigger/smaller 533 | SDL_FreeYUVOverlay(vp->bmp); 534 | } 535 | // Allocate a place to put our YUV image on that screen 536 | vp->bmp = SDL_CreateYUVOverlay(vidState->video_st->codec->width, 537 | vidState->video_st->codec->height, 538 | SDL_YV12_OVERLAY, 539 | screen); 540 | vp->width = vidState->video_st->codec->width; 541 | vp->height = vidState->video_st->codec->height; 542 | 543 | SDL_LockMutex(vidState->pictq_mutex); 544 | vp->allocated = 1; 545 | SDL_CondSignal(vidState->pictq_cond); 546 | SDL_UnlockMutex(vidState->pictq_mutex); 547 | 548 | } 549 | 550 | int queue_picture(VideoState *vidState, AVFrame *pFrame, double pts) { 551 | 552 | VideoPicture *vp; 553 | int dst_pix_fmt; 554 | AVPicture pict; 555 | 556 | /* wait until we have space for a new pic */ 557 | SDL_LockMutex(vidState->pictq_mutex); 558 | while(vidState->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && 559 | !vidState->quit) { 560 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 561 | } 562 | SDL_UnlockMutex(vidState->pictq_mutex); 563 | 564 | if(vidState->quit) 565 | return -1; 566 | 567 | // windex vidState set to 0 initially 568 | vp = &vidState->pictq[vidState->pictq_windex]; 569 | 570 | /* allocate or resize the buffer! */ 571 | if(!vp->bmp || 572 | vp->width != vidState->video_st->codec->width || 573 | vp->height != vidState->video_st->codec->height) { 574 | SDL_Event event; 575 | 576 | vp->allocated = 0; 577 | /* we have to do it in the main thread */ 578 | event.type = FF_ALLOC_EVENT; 579 | event.user.data1 = vidState; 580 | SDL_PushEvent(&event); 581 | 582 | /* wait until we have a picture allocated */ 583 | SDL_LockMutex(vidState->pictq_mutex); 584 | while(!vp->allocated && !vidState->quit) { 585 | SDL_CondWait(vidState->pictq_cond, vidState->pictq_mutex); 586 | } 587 | SDL_UnlockMutex(vidState->pictq_mutex); 588 | if(vidState->quit) { 589 | return -1; 590 | } 591 | } 592 | /* We have a place to put our picture on the queue */ 593 | /* If we are skipping a frame, do we set this to null 594 | but still return vp->allocated = 1? */ 595 | 596 | 597 | if(vp->bmp) { 598 | 599 | SDL_LockYUVOverlay(vp->bmp); 600 | 601 | dst_pix_fmt = PIX_FMT_YUV420P; 602 | /* point pict at the queue */ 603 | 604 | pict.data[0] = vp->bmp->pixels[0]; 605 | pict.data[1] = vp->bmp->pixels[2]; 606 | pict.data[2] = vp->bmp->pixels[1]; 607 | 608 | pict.linesize[0] = vp->bmp->pitches[0]; 609 | pict.linesize[1] = vp->bmp->pitches[2]; 610 | pict.linesize[2] = vp->bmp->pitches[1]; 611 | 612 | // Convert the image into YUV format that SDL uses 613 | static struct SwsContext *img_convert_ctx; 614 | int w = vidState->video_st->codec->width; 615 | int h = vidState->video_st->codec->height; 616 | if (!img_convert_ctx) 617 | img_convert_ctx = sws_getContext(w, h, vidState->video_st->codec->pix_fmt, 618 | w, h, dst_pix_fmt, 619 | SWS_X, NULL, NULL, NULL); 620 | 621 | sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, 622 | pFrame->linesize, 0, h, 623 | pict.data, pict.linesize); 624 | SDL_UnlockYUVOverlay(vp->bmp); 625 | vp->pts = pts; 626 | 627 | /* now we inform our display thread that we have a pic ready */ 628 | if(++vidState->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { 629 | vidState->pictq_windex = 0; 630 | } 631 | SDL_LockMutex(vidState->pictq_mutex); 632 | vidState->pictq_size++; 633 | SDL_UnlockMutex(vidState->pictq_mutex); 634 | } 635 | return 0; 636 | } 637 | 638 | double synchronize_video(VideoState *vidState, AVFrame *src_frame, double pts) { 639 | 640 | double frame_delay; 641 | 642 | if(pts != 0) { 643 | /* if we have pts, set video clock to it */ 644 | vidState->video_clock = pts; 645 | } else { 646 | /* if we aren't given a pts, set it to the clock */ 647 | pts = vidState->video_clock; 648 | } 649 | /* update the video clock */ 650 | frame_delay = av_q2d(vidState->video_st->codec->time_base); 651 | /* if we are repeating a frame, adjust clock accordingly */ 652 | frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); 653 | vidState->video_clock += frame_delay; 654 | return pts; 655 | } 656 | 657 | uint64_t global_video_pkt_pts = AV_NOPTS_VALUE; 658 | 659 | /* These are called whenever we allocate a frame 660 | * buffer. We use this to store the global_pts in 661 | * a frame at the time it vidState allocated. 662 | */ 663 | int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) { 664 | int ret = avcodec_default_get_buffer(c, pic); 665 | uint64_t *pts = av_malloc(sizeof(uint64_t)); 666 | *pts = global_video_pkt_pts; 667 | pic->opaque = pts; 668 | return ret; 669 | } 670 | void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) { 671 | if(pic) av_freep(&pic->opaque); 672 | avcodec_default_release_buffer(c, pic); 673 | } 674 | 675 | int video_thread(void *arg) { 676 | VideoState *vidState = (VideoState *)arg; 677 | AVPacket pkt1, *packet = &pkt1; 678 | int len1, frameFinished; 679 | AVFrame *pFrame; 680 | double pts; 681 | 682 | pFrame = avcodec_alloc_frame(); 683 | 684 | for(;;) { 685 | if(packet_queue_get(&vidState->videoq, packet, 1) < 0) { 686 | // means we quit getting packets 687 | break; 688 | } 689 | if(packet->data == flush_pkt.data) { 690 | avcodec_flush_buffers(vidState->video_st->codec); 691 | continue; 692 | } 693 | pts = 0; 694 | 695 | // Save global pts to be stored in pFrame in first call 696 | global_video_pkt_pts = packet->pts; 697 | // Decode video frame 698 | avcodec_decode_video2(vidState->video_st->codec, pFrame, &frameFinished, packet); 699 | if(packet->dts == AV_NOPTS_VALUE 700 | && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { 701 | pts = *(uint64_t *)pFrame->opaque; 702 | } else if(packet->dts != AV_NOPTS_VALUE) { 703 | pts = packet->dts; 704 | } else { 705 | pts = 0; 706 | } 707 | pts *= av_q2d(vidState->video_st->time_base); 708 | 709 | // Did we get a video frame? 710 | if(frameFinished) { 711 | pts = synchronize_video(vidState, pFrame, pts); 712 | if(queue_picture(vidState, pFrame, pts) < 0) { 713 | break; 714 | } 715 | } 716 | av_free_packet(packet); 717 | } 718 | av_free(pFrame); 719 | return 0; 720 | } 721 | int stream_component_open(VideoState *vidState, int stream_index) { 722 | 723 | AVFormatContext *pFormatCtx = vidState->pFormatCtx; 724 | AVCodecContext *codecCtx; 725 | AVCodec *codec; 726 | SDL_AudioSpec wanted_spec, spec; 727 | 728 | if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { 729 | return -1; 730 | } 731 | 732 | // Get a pointer to the codec context for the video stream 733 | codecCtx = pFormatCtx->streams[stream_index]->codec; 734 | 735 | if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { 736 | // Set audio settings from codec info 737 | wanted_spec.freq = codecCtx->sample_rate; 738 | wanted_spec.format = AUDIO_S16SYS; 739 | wanted_spec.channels = codecCtx->channels; 740 | wanted_spec.silence = 0; 741 | wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; 742 | wanted_spec.callback = audio_callback; 743 | wanted_spec.userdata = vidState; 744 | 745 | if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { 746 | fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); 747 | return -1; 748 | } 749 | vidState->audio_hw_buf_size = spec.size; 750 | } 751 | codec = avcodec_find_decoder(codecCtx->codec_id); 752 | if(!codec || (avcodec_open(codecCtx, codec) < 0)) { 753 | fprintf(stderr, "Unsupported codec!\n"); 754 | return -1; 755 | } 756 | 757 | switch(codecCtx->codec_type) { 758 | case AVMEDIA_TYPE_AUDIO: 759 | vidState->audioStream = stream_index; 760 | vidState->audio_st = pFormatCtx->streams[stream_index]; 761 | vidState->audio_buf_size = 0; 762 | vidState->audio_buf_index = 0; 763 | 764 | /* averaging filter for audio sync */ 765 | vidState->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); 766 | vidState->audio_diff_avg_count = 0; 767 | /* Correct audio only if larger error than this */ 768 | vidState->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; 769 | 770 | memset(&vidState->audio_pkt, 0, sizeof(vidState->audio_pkt)); 771 | packet_queue_init(&vidState->audioq); 772 | SDL_PauseAudio(0); 773 | break; 774 | case AVMEDIA_TYPE_VIDEO: 775 | vidState->videoStream = stream_index; 776 | vidState->video_st = pFormatCtx->streams[stream_index]; 777 | 778 | vidState->frame_timer = (double)av_gettime() / 1000000.0; 779 | vidState->frame_last_delay = 40e-3; 780 | vidState->video_current_pts_time = av_gettime(); 781 | 782 | packet_queue_init(&vidState->videoq); 783 | vidState->video_tid = SDL_CreateThread(video_thread, vidState); 784 | codecCtx->get_buffer = our_get_buffer; 785 | codecCtx->release_buffer = our_release_buffer; 786 | 787 | break; 788 | default: 789 | break; 790 | } 791 | 792 | 793 | } 794 | 795 | int decode_interrupt_cb(void * ctx) { 796 | return (global_video_state && global_video_state->quit); 797 | } 798 | 799 | const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; 800 | 801 | int decode_thread(void *arg) { 802 | 803 | VideoState *vidState = (VideoState *)arg; 804 | AVFormatContext *pFormatCtx; 805 | AVPacket pkt1, *packet = &pkt1; 806 | 807 | int video_index = -1; 808 | int audio_index = -1; 809 | int i; 810 | 811 | vidState->videoStream=-1; 812 | vidState->audioStream=-1; 813 | 814 | global_video_state = vidState; 815 | 816 | pFormatCtx = avformat_alloc_context(); 817 | pFormatCtx->interrupt_callback = int_cb; 818 | 819 | if (avio_open2(&pFormatCtx->pb, vidState->filename, AVIO_FLAG_READ, &pFormatCtx->interrupt_callback, NULL)) 820 | return -1; 821 | 822 | // Open video file 823 | if (avformat_open_input(&pFormatCtx, vidState->filename, NULL, NULL)!=0) 824 | return -1; // Couldn't open file 825 | 826 | vidState->pFormatCtx = pFormatCtx; 827 | 828 | // Retrieve stream information 829 | if(avformat_find_stream_info(pFormatCtx, NULL)<0) 830 | return -1; // Couldn't find stream information 831 | 832 | // Dump information about file onto standard error 833 | av_dump_format(pFormatCtx, 0, vidState->filename, 0); 834 | 835 | // Find the first video stream 836 | for(i=0; inb_streams; i++) { 837 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && 838 | video_index < 0) { 839 | video_index=i; 840 | } 841 | if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && 842 | audio_index < 0) { 843 | audio_index=i; 844 | } 845 | } 846 | if(audio_index >= 0) { 847 | stream_component_open(vidState, audio_index); 848 | } 849 | if(video_index >= 0) { 850 | stream_component_open(vidState, video_index); 851 | } 852 | 853 | if(vidState->videoStream < 0 || vidState->audioStream < 0) { 854 | fprintf(stderr, "%s: could not open codecs\n", vidState->filename); 855 | goto fail; 856 | } 857 | 858 | // main decode loop 859 | 860 | for(;;) { 861 | if(vidState->quit) { 862 | break; 863 | } 864 | // seek stuff goes here 865 | if(vidState->seek_req) { 866 | int stream_index= -1; 867 | int64_t seek_target = vidState->seek_pos; 868 | 869 | if (vidState->videoStream >= 0) stream_index = vidState->videoStream; 870 | else if(vidState->audioStream >= 0) stream_index = vidState->audioStream; 871 | 872 | if(stream_index>=0){ 873 | seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); 874 | } 875 | if(!av_seek_frame(vidState->pFormatCtx, stream_index, seek_target, vidState->seek_flags)) { 876 | fprintf(stderr, "%s: error while seeking\n", vidState->pFormatCtx->filename); 877 | } else { 878 | if(vidState->audioStream >= 0) { 879 | packet_queue_flush(&vidState->audioq); 880 | packet_queue_put(&vidState->audioq, &flush_pkt); 881 | } 882 | if(vidState->videoStream >= 0) { 883 | packet_queue_flush(&vidState->videoq); 884 | packet_queue_put(&vidState->videoq, &flush_pkt); 885 | } 886 | } 887 | vidState->seek_req = 0; 888 | } 889 | 890 | if(vidState->audioq.size > MAX_AUDIOQ_SIZE || 891 | vidState->videoq.size > MAX_VIDEOQ_SIZE) { 892 | SDL_Delay(10); 893 | continue; 894 | } 895 | if(av_read_frame(vidState->pFormatCtx, packet) < 0) { 896 | //if(url_ferror(&pFormatCtx->pb) == 0) { 897 | if(&pFormatCtx->pb && &pFormatCtx->pb->error){ 898 | SDL_Delay(100); /* no error; wait for user input */ 899 | continue; 900 | } else { 901 | break; 902 | } 903 | } 904 | // Is this a packet from the video stream? 905 | if(packet->stream_index == vidState->videoStream) { 906 | packet_queue_put(&vidState->videoq, packet); 907 | } else if(packet->stream_index == vidState->audioStream) { 908 | packet_queue_put(&vidState->audioq, packet); 909 | } else { 910 | av_free_packet(packet); 911 | } 912 | } 913 | /* all done - wait for it */ 914 | while(!vidState->quit) { 915 | SDL_Delay(100); 916 | } 917 | fail: 918 | { 919 | SDL_Event event; 920 | event.type = FF_QUIT_EVENT; 921 | event.user.data1 = vidState; 922 | SDL_PushEvent(&event); 923 | } 924 | return 0; 925 | } 926 | 927 | void stream_seek(VideoState *vidState, int64_t pos, int rel) { 928 | 929 | if(!vidState->seek_req) { 930 | vidState->seek_pos = pos; 931 | vidState->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; 932 | vidState->seek_req = 1; 933 | } 934 | } 935 | int main(int argc, char *argv[]) { 936 | 937 | SDL_Event event; 938 | double pts; 939 | VideoState *vidState; 940 | 941 | vidState = av_mallocz(sizeof(VideoState)); 942 | 943 | if(argc < 2) { 944 | fprintf(stderr, "Usage: test \n"); 945 | exit(1); 946 | } 947 | // Register all formats and codecs 948 | av_register_all(); 949 | 950 | if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { 951 | fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); 952 | exit(1); 953 | } 954 | 955 | // Make a screen to put our video 956 | #ifndef __DARWIN__ 957 | screen = SDL_SetVideoMode(640, 480, 0, 0); 958 | #else 959 | screen = SDL_SetVideoMode(640, 480, 24, 0); 960 | #endif 961 | if(!screen) { 962 | fprintf(stderr, "SDL: could not set video mode - exiting\n"); 963 | exit(1); 964 | } 965 | 966 | av_strlcpy(vidState->filename, argv[1], sizeof(vidState->filename)); 967 | 968 | vidState->pictq_mutex = SDL_CreateMutex(); 969 | vidState->pictq_cond = SDL_CreateCond(); 970 | 971 | schedule_refresh(vidState, 40); 972 | 973 | vidState->av_sync_type = DEFAULT_AV_SYNC_TYPE; 974 | vidState->parse_tid = SDL_CreateThread(decode_thread, vidState); 975 | if(!vidState->parse_tid) { 976 | av_free(vidState); 977 | return -1; 978 | } 979 | 980 | av_init_packet(&flush_pkt); 981 | flush_pkt.data = "FLUSH"; 982 | 983 | for(;;) { 984 | double incr, pos; 985 | SDL_WaitEvent(&event); 986 | switch(event.type) { 987 | case SDL_KEYDOWN: 988 | switch(event.key.keysym.sym) { 989 | case SDLK_LEFT: 990 | incr = -10.0; 991 | goto do_seek; 992 | case SDLK_RIGHT: 993 | incr = 10.0; 994 | goto do_seek; 995 | case SDLK_UP: 996 | incr = 60.0; 997 | goto do_seek; 998 | case SDLK_DOWN: 999 | incr = -60.0; 1000 | goto do_seek; 1001 | do_seek: 1002 | if(global_video_state) { 1003 | pos = get_master_clock(global_video_state); 1004 | pos += incr; 1005 | stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); 1006 | } 1007 | break; 1008 | default: 1009 | break; 1010 | } 1011 | break; 1012 | case FF_QUIT_EVENT: 1013 | case SDL_QUIT: 1014 | vidState->quit = 1; 1015 | SDL_Quit(); 1016 | exit(0); 1017 | break; 1018 | case FF_ALLOC_EVENT: 1019 | alloc_picture(event.user.data1); 1020 | break; 1021 | case FF_REFRESH_EVENT: 1022 | video_refresh_timer(event.user.data1); 1023 | break; 1024 | default: 1025 | break; 1026 | } 1027 | } 1028 | return 0; 1029 | } 1030 | --------------------------------------------------------------------------------