├── LICENSE ├── Makefile ├── README.md ├── RTSPFF.cpp ├── demoLive555withFFMPEG.sln └── demoLive555withFFMPEG.vcxproj /LICENSE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuvalk/demoLive555withFFMPEG/f3bab455b8e11c3f4ecfd7c070b2e4ea526dd064/LICENSE -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | LIVE = /home/yuval/dev/live 2 | INCLUDES = -I$(LIVE)/UsageEnvironment/include -I$(LIVE)/groupsock/include -I$(LIVE)/liveMedia/include -I$(LIVE)/BasicUsageEnvironment/include 3 | # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. 4 | libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) 5 | libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) 6 | libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX) 7 | libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX) 8 | ##### Change the following for your environment: 9 | COMPILE_OPTS = $(INCLUDES) -I. -I/home/yuval/dev/ffmpeg -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -D__STDC_CONSTANT_MACROS 10 | C = c 11 | C_COMPILER = cc 12 | C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS) 13 | CPP = cpp 14 | CPLUSPLUS_COMPILER = c++ 15 | CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS) 16 | OBJ = o 17 | LINK = c++ -o 18 | LINK_OPTS = -L. $(LDFLAGS) 19 | CONSOLE_LINK_OPTS = $(LINK_OPTS) 20 | LIBRARY_LINK = ar cr 21 | LIBRARY_LINK_OPTS = 22 | LIB_SUFFIX = a 23 | LIBS_FOR_CONSOLE_APPLICATION = 24 | LIBS_FOR_GUI_APPLICATION = 25 | EXE = 26 | ##### End of variables to change 27 | 28 | 29 | PREFIX = /usr/local 30 | ALL = RTSPFF$(EXE) 31 | all: $(ALL) 32 | 33 | extra: testGSMStreamer$(EXE) 34 | 35 | .$(C).$(OBJ): 36 | $(C_COMPILER) -c $(C_FLAGS) $< 37 | .$(CPP).$(OBJ): 38 | $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< 39 | 40 | 41 | RTSP_FF_OBJS = RTSPFF.$(OBJ) 42 | 43 | openRTSP.$(CPP): playCommon.hh 44 | playCommon.$(CPP): playCommon.hh 45 | playSIP.$(CPP): playCommon.hh 46 | 47 | USAGE_ENVIRONMENT_DIR = $(LIVE)/UsageEnvironment 48 | USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX) 49 | BASIC_USAGE_ENVIRONMENT_DIR = $(LIVE)/BasicUsageEnvironment 50 | BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX) 51 | LIVEMEDIA_DIR = $(LIVE)/liveMedia 52 | LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX) 53 | GROUPSOCK_DIR = $(LIVE)/groupsock 54 | GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX) 55 | LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \ 56 | $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB) 57 | LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION) 58 | 59 | RTSPFF$(EXE): $(RTSP_FF_OBJS) $(LOCAL_LIBS) 60 | $(LINK)$@ $(CONSOLE_LINK_OPTS) $(RTSP_FF_OBJS) $(LIBS) /home/yuval/dev/ffmpeg/libavcodec/libavcodec.a /home/yuval/dev/ffmpeg/libavformat/libavformat.a /home/yuval/dev/ffmpeg/libavutil/libavutil.a /home/yuval/dev/ffmpeg/libavfilter/libavfilter.a /home/yuval/dev/ffmpeg/libavdevice/libavdevice.a /home/yuval/dev/ffmpeg/libswscale/libswscale.a /home/yuval/dev/ffmpeg/libswresample/libswresample.a -lm -lz -lbz2 -lpthread -lSDL 61 | 62 | clean: 63 | -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~ 64 | 65 | install: $(ALL) 66 | install -d $(DESTDIR)$(PREFIX)/bin 67 | install -m 755 $(ALL) $(DESTDIR)$(PREFIX)/bin 68 | 69 | ##### Any additional, platform-specific rules come here: 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | demoLive555withFFMPEG 2 | ===================== 3 | Writen by: Yuval Kashtan 4 | 2013-10-16 5 | 6 | Demonstrate live555 RTSP with FFMPEG H264 support 7 | 8 | Since I had many troubles finding a working example out there, I decided to create one. 9 | I add SDL support just so you can see it working and used VLC to stream content to test it. 10 | 11 | It is based off the test client supplied with Live555 on top of which I've added a modified version of Fabrice Bellard libavcodec/api-example.c. 12 | 13 | HOW TO COMPILE 14 | ----------------- 15 | you'll have to modify the Makefile to fit your location of ffmpeg and live555 (and SDL) 16 | 17 | HOW TO COMPILE ON WINDOWS 18 | -------------------------- 19 | you'll have to organize the pre-requesits under ReferenceLib on your own (the names of the directories are all in the .vcxproj file) 20 | then you can compile with Visual Studio 21 | 22 | KNOWN LIMITATIONS: 23 | ------------------- 24 | This is not a fully working client! 25 | merly a quick demo 26 | 27 | - for now, you'll have to manually set the width and height in the code 28 | 29 | -------------------------------------------------------------------------------- /RTSPFF.cpp: -------------------------------------------------------------------------------- 1 | /********** 2 | This library is free software; you can redistribute it and/or modify it under 3 | the terms of the GNU Lesser General Public License as published by the 4 | Free Software Foundation; either version 2.1 of the License, or (at your 5 | option) any later version. (See .) 6 | 7 | This library is distributed in the hope that it will be useful, but WITHOUT 8 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 9 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for 10 | more details. 11 | 12 | You should have received a copy of the GNU Lesser General Public License 13 | along with this library; if not, write to the Free Software Foundation, Inc., 14 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 15 | **********/ 16 | // Copyright (c) 1996-2013, Live Networks, Inc. All rights reserved 17 | // A demo application, showing how to create and run a RTSP client (that can potentially receive multiple streams concurrently). 18 | // 19 | // NOTE: This code - although it builds a running application - is intended only to illustrate how to develop your own RTSP 20 | // client application. For a full-featured RTSP client application - with much more functionality, and many options - see 21 | // "openRTSP": http://www.live555.com/openRTSP/ 22 | 23 | /*********************************************************************************** 24 | this version is modified and edited by Yuval Kashtan 25 | intended for the soul purpose of DEMONSTRATING how to integrate live555 with ffmpeg 26 | to decode a live H264 RTSP stream and display it using SDL 27 | ************************************************************************************/ 28 | 29 | #ifdef WIN32 30 | #include 31 | #endif 32 | 33 | #include "liveMedia.hh" 34 | #include "BasicUsageEnvironment.hh" 35 | 36 | // SDL 37 | #include 38 | #include 39 | 40 | // FFMPEG 41 | extern "C" { 42 | #include 43 | #include 44 | #include 45 | 46 | #ifdef HAVE_AV_CONFIG_H 47 | #undef HAVE_AV_CONFIG_H 48 | #endif 49 | 50 | #include "libavcodec/avcodec.h" 51 | #include "libavformat/avformat.h" 52 | #include "libavutil/mathematics.h" 53 | #include "libavutil/old_pix_fmts.h" 54 | #include "libswscale/swscale.h" 55 | 56 | #define INBUF_SIZE 4096 57 | #define AUDIO_INBUF_SIZE 20480 58 | #define AUDIO_REFILL_THRESH 4096 59 | 60 | } 61 | #include "Base64.hh" 62 | // Forward function definitions: 63 | 64 | // RTSP 'response handlers': 65 | void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString); 66 | void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString); 67 | void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString); 68 | 69 | // Other event handler functions: 70 | void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends 71 | void subsessionByeHandler(void* clientData); // called when a RTCP "BYE" is received for a subsession 72 | void streamTimerHandler(void* clientData); 73 | // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE") 74 | 75 | // The main streaming routine (for each "rtsp://" URL): 76 | void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); 77 | 78 | // Used to iterate through each stream's 'subsessions', setting up each one: 79 | void setupNextSubsession(RTSPClient* rtspClient); 80 | 81 | // Used to shut down and close a stream (including its "RTSPClient" object): 82 | void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); 83 | 84 | // A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish: 85 | UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) { 86 | return env << "[URL:\"" << rtspClient.url() << "\"]: "; 87 | } 88 | 89 | // A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish: 90 | UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) { 91 | return env << subsession.mediumName() << "/" << subsession.codecName(); 92 | } 93 | 94 | void usage(UsageEnvironment& env, char const* progName) { 95 | env << "Usage: " << progName << " ... \n"; 96 | env << "\t(where each is a \"rtsp://\" URL)\n"; 97 | } 98 | 99 | char eventLoopWatchVariable = 0; 100 | 101 | int main(int argc, char** argv) { 102 | // Begin by setting up our usage environment: 103 | TaskScheduler* scheduler = BasicTaskScheduler::createNew(); 104 | UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); 105 | 106 | // We need at least one "rtsp://" URL argument: 107 | if (argc < 2) { 108 | usage(*env, argv[0]); 109 | return 1; 110 | } 111 | 112 | // avcodec init 113 | avcodec_register_all(); 114 | av_register_all(); 115 | 116 | // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start streaming each one: 117 | for (int i = 1; i <= argc-1; ++i) { 118 | openURL(*env, argv[0], argv[i]); 119 | } 120 | 121 | // All subsequent activity takes place within the event loop: 122 | env->taskScheduler().doEventLoop(&eventLoopWatchVariable); 123 | // This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero. 124 | 125 | return 0; 126 | 127 | // If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above), 128 | // and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects, 129 | // then you can also reclaim the (small) memory used by these objects by uncommenting the following code: 130 | /* 131 | env->reclaim(); env = NULL; 132 | delete scheduler; scheduler = NULL; 133 | */ 134 | } 135 | 136 | // Define a class to hold per-stream state that we maintain throughout each stream's lifetime: 137 | 138 | class StreamClientState { 139 | public: 140 | StreamClientState(); 141 | virtual ~StreamClientState(); 142 | 143 | public: 144 | MediaSubsessionIterator* iter; 145 | MediaSession* session; 146 | MediaSubsession* subsession; 147 | TaskToken streamTimerTask; 148 | double duration; 149 | }; 150 | 151 | // If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single 152 | // "StreamClientState" structure, as a global variable in your application. However, because - in this demo application - we're 153 | // showing how to play multiple streams, concurrently, we can't do that. Instead, we have to have a separate "StreamClientState" 154 | // structure for each "RTSPClient". To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass: 155 | 156 | class ourRTSPClient: public RTSPClient { 157 | public: 158 | static ourRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, 159 | int verbosityLevel = 0, 160 | char const* applicationName = NULL, 161 | portNumBits tunnelOverHTTPPortNum = 0); 162 | 163 | protected: 164 | ourRTSPClient(UsageEnvironment& env, char const* rtspURL, 165 | int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum); 166 | // called only by createNew(); 167 | virtual ~ourRTSPClient(); 168 | 169 | public: 170 | StreamClientState scs; 171 | }; 172 | 173 | // Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream'). 174 | // In practice, this might be a class (or a chain of classes) that decodes and then renders the incoming audio or video. 175 | // Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application). 176 | // In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it. 177 | 178 | class DummySink: public MediaSink { 179 | public: 180 | static DummySink* createNew(UsageEnvironment& env, 181 | MediaSubsession& subsession, // identifies the kind of data that's being received 182 | char const* streamId = NULL); // identifies the stream itself (optional) 183 | 184 | private: 185 | DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId); 186 | // called only by "createNew()" 187 | virtual ~DummySink(); 188 | 189 | static void afterGettingFrame(void* clientData, unsigned frameSize, 190 | unsigned numTruncatedBytes, 191 | struct timeval presentationTime, 192 | unsigned durationInMicroseconds); 193 | void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, 194 | struct timeval presentationTime, unsigned durationInMicroseconds); 195 | 196 | private: 197 | // redefined virtual functions: 198 | virtual Boolean continuePlaying(); 199 | 200 | private: 201 | u_int8_t* fReceiveBuffer; 202 | u_int8_t* fReceiveBufferAV; 203 | MediaSubsession& fSubsession; 204 | char* fStreamId; 205 | 206 | private: //H264 207 | u_int8_t const* sps; 208 | unsigned spsSize; 209 | u_int8_t const* pps; 210 | unsigned ppsSize; 211 | public: void setSprop(u_int8_t const* prop, unsigned size); 212 | 213 | private: //FFMPEG 214 | AVCodec *codec; 215 | AVCodecContext *c; 216 | int frame; 217 | int got_picture; 218 | int len; 219 | AVFrame *picture; 220 | uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; 221 | char buf[1024]; 222 | AVPacket avpkt; 223 | 224 | private: //SDL 225 | SDL_Surface *screen; 226 | SDL_Overlay *bmp; 227 | SDL_Rect rect; 228 | }; 229 | 230 | #define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient" 231 | 232 | static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use. 233 | 234 | void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { 235 | // Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish 236 | // to receive (even if more than stream uses the same "rtsp://" URL). 237 | RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); 238 | if (rtspClient == NULL) { 239 | env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n"; 240 | return; 241 | } 242 | 243 | ++rtspClientCount; 244 | 245 | // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream. 246 | // Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response. 247 | // Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop: 248 | rtspClient->sendDescribeCommand(continueAfterDESCRIBE); 249 | } 250 | 251 | 252 | // Implementation of the RTSP 'response handlers': 253 | 254 | void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) { 255 | do { 256 | UsageEnvironment& env = rtspClient->envir(); // alias 257 | StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias 258 | 259 | if (resultCode != 0) { 260 | env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n"; 261 | delete[] resultString; 262 | break; 263 | } 264 | 265 | char* const sdpDescription = resultString; 266 | env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; 267 | 268 | // Create a media session object from this SDP description: 269 | scs.session = MediaSession::createNew(env, sdpDescription); 270 | delete[] sdpDescription; // because we don't need it anymore 271 | if (scs.session == NULL) { 272 | env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n"; 273 | break; 274 | } else if (!scs.session->hasSubsessions()) { 275 | env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; 276 | break; 277 | } 278 | 279 | // Then, create and set up our data source objects for the session. We do this by iterating over the session's 'subsessions', 280 | // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one. 281 | // (Each 'subsession' will have its own data source.) 282 | scs.iter = new MediaSubsessionIterator(*scs.session); 283 | setupNextSubsession(rtspClient); 284 | return; 285 | } while (0); 286 | 287 | // An unrecoverable error occurred with this stream. 288 | shutdownStream(rtspClient); 289 | } 290 | 291 | // By default, we request that the server stream its data using RTP/UDP. 292 | // If, instead, you want to request that the server stream via RTP-over-TCP, change the following to True: 293 | #define REQUEST_STREAMING_OVER_TCP False 294 | 295 | void setupNextSubsession(RTSPClient* rtspClient) { 296 | UsageEnvironment& env = rtspClient->envir(); // alias 297 | StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias 298 | 299 | scs.subsession = scs.iter->next(); 300 | if (scs.subsession != NULL) { 301 | if (!scs.subsession->initiate()) { 302 | env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; 303 | setupNextSubsession(rtspClient); // give up on this subsession; go to the next one 304 | } else { 305 | env << *rtspClient << "Initiated the \"" << *scs.subsession 306 | << "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n"; 307 | 308 | // Continue setting up this subsession, by sending a RTSP "SETUP" command: 309 | rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP); 310 | } 311 | return; 312 | } 313 | 314 | // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" command to start the streaming: 315 | if (scs.session->absStartTime() != NULL) { 316 | // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command: 317 | rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime()); 318 | } else { 319 | scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); 320 | rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); 321 | } 322 | } 323 | 324 | void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { 325 | do { 326 | UsageEnvironment& env = rtspClient->envir(); // alias 327 | StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias 328 | 329 | if (resultCode != 0) { 330 | env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n"; 331 | break; 332 | } 333 | 334 | env << *rtspClient << "Set up the \"" << *scs.subsession 335 | << "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n"; 336 | 337 | const char *sprop = scs.subsession->fmtp_spropparametersets(); 338 | uint8_t const* sps = NULL; 339 | unsigned spsSize = 0; 340 | uint8_t const* pps = NULL; 341 | unsigned ppsSize = 0; 342 | 343 | if (sprop != NULL) { 344 | unsigned numSPropRecords; 345 | SPropRecord* sPropRecords = parseSPropParameterSets(sprop, numSPropRecords); 346 | for (unsigned i = 0; i < numSPropRecords; ++i) { 347 | if (sPropRecords[i].sPropLength == 0) continue; // bad data 348 | u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F; 349 | if (nal_unit_type == 7/*SPS*/) { 350 | sps = sPropRecords[i].sPropBytes; 351 | spsSize = sPropRecords[i].sPropLength; 352 | } else if (nal_unit_type == 8/*PPS*/) { 353 | pps = sPropRecords[i].sPropBytes; 354 | ppsSize = sPropRecords[i].sPropLength; 355 | } 356 | } 357 | } 358 | 359 | // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it. 360 | // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later, 361 | // after we've sent a RTSP "PLAY" command.) 362 | scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url()); 363 | // perhaps use your own custom "MediaSink" subclass instead 364 | if (scs.subsession->sink == NULL) { 365 | env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession 366 | << "\" subsession: " << env.getResultMsg() << "\n"; 367 | break; 368 | } 369 | 370 | env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n"; 371 | scs.subsession->miscPtr = rtspClient; // a hack to let subsession handle functions get the "RTSPClient" from the subsession 372 | if (sps != NULL) { 373 | ((DummySink *)scs.subsession->sink)->setSprop(sps, spsSize); 374 | } 375 | if (pps != NULL) { 376 | ((DummySink *)scs.subsession->sink)->setSprop(pps, ppsSize); 377 | } 378 | scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), 379 | subsessionAfterPlaying, scs.subsession); 380 | // Also set a handler to be called if a RTCP "BYE" arrives for this subsession: 381 | if (scs.subsession->rtcpInstance() != NULL) { 382 | scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession); 383 | } 384 | } while (0); 385 | delete[] resultString; 386 | 387 | // Set up the next subsession, if any: 388 | setupNextSubsession(rtspClient); 389 | } 390 | 391 | void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) { 392 | Boolean success = False; 393 | 394 | do { 395 | UsageEnvironment& env = rtspClient->envir(); // alias 396 | StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias 397 | 398 | if (resultCode != 0) { 399 | env << *rtspClient << "Failed to start playing session: " << resultString << "\n"; 400 | break; 401 | } 402 | 403 | // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end 404 | // using a RTCP "BYE"). This is optional. If, instead, you want to keep the stream active - e.g., so you can later 405 | // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code. 406 | // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.) 407 | if (scs.duration > 0) { 408 | unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration. (This is optional.) 409 | scs.duration += delaySlop; 410 | unsigned uSecsToDelay = (unsigned)(scs.duration*1000000); 411 | scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); 412 | } 413 | 414 | env << *rtspClient << "Started playing session"; 415 | if (scs.duration > 0) { 416 | env << " (for up to " << scs.duration << " seconds)"; 417 | } 418 | env << "...\n"; 419 | 420 | success = True; 421 | } while (0); 422 | delete[] resultString; 423 | 424 | if (!success) { 425 | // An unrecoverable error occurred with this stream. 426 | shutdownStream(rtspClient); 427 | } 428 | } 429 | 430 | 431 | // Implementation of the other event handlers: 432 | 433 | void subsessionAfterPlaying(void* clientData) { 434 | MediaSubsession* subsession = (MediaSubsession*)clientData; 435 | RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); 436 | 437 | // Begin by closing this subsession's stream: 438 | Medium::close(subsession->sink); 439 | subsession->sink = NULL; 440 | 441 | // Next, check whether *all* subsessions' streams have now been closed: 442 | MediaSession& session = subsession->parentSession(); 443 | MediaSubsessionIterator iter(session); 444 | while ((subsession = iter.next()) != NULL) { 445 | if (subsession->sink != NULL) return; // this subsession is still active 446 | } 447 | 448 | // All subsessions' streams have now been closed, so shutdown the client: 449 | shutdownStream(rtspClient); 450 | } 451 | 452 | void subsessionByeHandler(void* clientData) { 453 | MediaSubsession* subsession = (MediaSubsession*)clientData; 454 | RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; 455 | UsageEnvironment& env = rtspClient->envir(); // alias 456 | 457 | env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n"; 458 | 459 | // Now act as if the subsession had closed: 460 | subsessionAfterPlaying(subsession); 461 | } 462 | 463 | void streamTimerHandler(void* clientData) { 464 | ourRTSPClient* rtspClient = (ourRTSPClient*)clientData; 465 | StreamClientState& scs = rtspClient->scs; // alias 466 | 467 | scs.streamTimerTask = NULL; 468 | 469 | // Shut down the stream: 470 | shutdownStream(rtspClient); 471 | } 472 | 473 | void shutdownStream(RTSPClient* rtspClient, int exitCode) { 474 | UsageEnvironment& env = rtspClient->envir(); // alias 475 | StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias 476 | 477 | // First, check whether any subsessions have still to be closed: 478 | if (scs.session != NULL) { 479 | Boolean someSubsessionsWereActive = False; 480 | MediaSubsessionIterator iter(*scs.session); 481 | MediaSubsession* subsession; 482 | 483 | while ((subsession = iter.next()) != NULL) { 484 | if (subsession->sink != NULL) { 485 | Medium::close(subsession->sink); 486 | subsession->sink = NULL; 487 | 488 | if (subsession->rtcpInstance() != NULL) { 489 | subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN" 490 | } 491 | 492 | someSubsessionsWereActive = True; 493 | } 494 | } 495 | 496 | if (someSubsessionsWereActive) { 497 | // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream. 498 | // Don't bother handling the response to the "TEARDOWN". 499 | rtspClient->sendTeardownCommand(*scs.session, NULL); 500 | } 501 | } 502 | 503 | env << *rtspClient << "Closing the stream.\n"; 504 | Medium::close(rtspClient); 505 | // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed. 506 | 507 | if (--rtspClientCount == 0) { 508 | // The final stream has ended, so exit the application now. 509 | // (Of course, if you're embedding this code into your own application, you might want to comment this out, 510 | // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".) 511 | exit(exitCode); 512 | } 513 | } 514 | 515 | 516 | // Implementation of "ourRTSPClient": 517 | 518 | ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, char const* rtspURL, 519 | int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) { 520 | return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum); 521 | } 522 | 523 | ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL, 524 | int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) 525 | : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) { 526 | } 527 | 528 | ourRTSPClient::~ourRTSPClient() { 529 | } 530 | 531 | 532 | // Implementation of "StreamClientState": 533 | 534 | StreamClientState::StreamClientState() 535 | : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) { 536 | } 537 | 538 | StreamClientState::~StreamClientState() { 539 | delete iter; 540 | if (session != NULL) { 541 | // We also need to delete "session", and unschedule "streamTimerTask" (if set) 542 | UsageEnvironment& env = session->envir(); // alias 543 | 544 | env.taskScheduler().unscheduleDelayedTask(streamTimerTask); 545 | Medium::close(session); 546 | } 547 | } 548 | 549 | 550 | // Implementation of "DummySink": 551 | 552 | // Even though we're not going to be doing anything with the incoming data, we still need to receive it. 553 | // Define the size of the buffer that we'll use: 554 | #define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000 555 | 556 | DummySink* DummySink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) { 557 | return new DummySink(env, subsession, streamId); 558 | } 559 | 560 | DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) 561 | : MediaSink(env), 562 | fSubsession(subsession) { 563 | fStreamId = strDup(streamId); 564 | fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE]; 565 | fReceiveBufferAV = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE+4]; 566 | fReceiveBufferAV[0] = 0; 567 | fReceiveBufferAV[1] = 0; 568 | fReceiveBufferAV[2] = 0; 569 | fReceiveBufferAV[3] = 1; 570 | 571 | 572 | av_init_packet(&avpkt); 573 | avpkt.flags |= AV_PKT_FLAG_KEY; 574 | avpkt.pts = avpkt.dts = 0; 575 | 576 | /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ 577 | memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); 578 | 579 | //codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO); 580 | codec = avcodec_find_decoder(CODEC_ID_H264); 581 | if (!codec) { 582 | envir() << "codec not found!"; 583 | exit(4); 584 | } 585 | 586 | c = avcodec_alloc_context3(codec); 587 | picture = avcodec_alloc_frame(); 588 | 589 | if (codec->capabilities & CODEC_CAP_TRUNCATED) { 590 | c->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames 591 | } 592 | 593 | c->width = 640; 594 | c->height = 360; 595 | c->pix_fmt = PIX_FMT_YUV420P; 596 | 597 | /* for some codecs width and height MUST be initialized there becuase this info is not available in the bitstream */ 598 | 599 | if (avcodec_open2(c,codec,NULL) < 0) { 600 | envir() << "could not open codec"; 601 | exit(5); 602 | } 603 | 604 | 605 | //SDL init 606 | if ( 607 | SDL_Init( 608 | SDL_INIT_VIDEO | 609 | SDL_INIT_AUDIO | 610 | SDL_INIT_TIMER 611 | ) 612 | ) { 613 | envir() << "Could not initialize SDL - " << SDL_GetError() << "\n"; 614 | exit (10); 615 | } 616 | 617 | 618 | screen = SDL_SetVideoMode( 619 | c->width, 620 | c->height, 621 | 24, 622 | 0 623 | ); 624 | if (!screen) { 625 | envir() << "SDL: could not set video mode - exiting\n"; 626 | exit(11); 627 | } 628 | 629 | bmp = SDL_CreateYUVOverlay( 630 | c->width, 631 | c->height, 632 | SDL_YV12_OVERLAY, 633 | screen 634 | ); 635 | } 636 | 637 | DummySink::~DummySink() { 638 | delete[] fReceiveBuffer; 639 | delete[] fStreamId; 640 | } 641 | 642 | void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes, 643 | struct timeval presentationTime, unsigned durationInMicroseconds) { 644 | DummySink* sink = (DummySink*)clientData; 645 | sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); 646 | } 647 | 648 | // If you don't want to see debugging output for each received frame, then comment out the following line: 649 | //#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1 650 | /* 651 | void DummySink::r2sprop2() { 652 | avpkt.data[0] = 0; 653 | avpkt.data[1] = 0; 654 | avpkt.data[2] = 0; 655 | avpkt.data[3] = 1; 656 | avpkt.data[4] = 0x68; 657 | avpkt.data[5] = 0xeb; 658 | avpkt.data[6] = 0xe3; 659 | avpkt.data[7] = 0xcb; 660 | avpkt.data[8] = 0x22; 661 | avpkt.data[9] = 0xc0; 662 | avpkt.size=10; 663 | len = avcodec_decode_video2 (c, picture, &got_picture, &avpkt); 664 | if (len < 0) { 665 | // envir() << "Error while decoding frame" << frame; 666 | // exit(6); 667 | } 668 | } 669 | void DummySink::r2sprop() { 670 | avpkt.data[0] = 0; 671 | avpkt.data[1] = 0; 672 | avpkt.data[2] = 0; 673 | avpkt.data[3] = 1; 674 | avpkt.data[4] = 0x67; 675 | avpkt.data[5] = 0x64; 676 | avpkt.data[6] = 0x00; 677 | avpkt.data[7] = 0x1e; 678 | avpkt.data[8] = 0xac; 679 | avpkt.data[9] = 0xd9; 680 | avpkt.data[10] = 0x40; 681 | avpkt.data[11] = 0xa0; 682 | avpkt.data[12] = 0x2f; 683 | avpkt.data[13] = 0xf9; 684 | avpkt.data[14] = 0x70; 685 | avpkt.data[15] = 0x11; 686 | avpkt.data[16] = 0x00; 687 | avpkt.data[17] = 0x00; 688 | avpkt.data[18] = 0x03; 689 | avpkt.data[19] = 0x03; 690 | avpkt.data[20] = 0xe8; 691 | avpkt.data[21] = 0x00; 692 | avpkt.data[22] = 0x00; 693 | avpkt.data[23] = 0xe9; 694 | avpkt.data[24] = 0xba; 695 | avpkt.data[25] = 0x8f; 696 | avpkt.data[26] = 0x16; 697 | avpkt.data[27] = 0x2d; 698 | avpkt.data[28] = 0x96; 699 | avpkt.size=29; 700 | len = avcodec_decode_video2 (c, picture, &got_picture, &avpkt); 701 | if (len < 0) { 702 | // envir() << "Error while decoding frame" << frame; 703 | // exit(6); 704 | } 705 | } 706 | */ 707 | void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename) { 708 | FILE *fp; 709 | int i; 710 | 711 | fp = fopen (filename,"wb"); 712 | fprintf(fp,"P5\n%d %d\n%d\n",xsize,ysize,255); 713 | for(i=0;i 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)"; 749 | char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time 750 | sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec); 751 | envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr; 752 | if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { 753 | envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized 754 | } 755 | #ifdef DEBUG_PRINT_NPT 756 | envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); 757 | #endif 758 | envir() << "\n"; 759 | #endif 760 | if (strcmp(fSubsession.codecName(),"H264") == 0) { 761 | avpkt.data = fReceiveBufferAV; 762 | // r2sprop(); 763 | // r2sprop2(); 764 | // avpkt.size = (int)fReceiveBuffer[0]; 765 | avpkt.size = frameSize + 4; 766 | // avpkt.size = frameSize; 767 | if (avpkt.size != 0) { 768 | memcpy (fReceiveBufferAV + 4, fReceiveBuffer, frameSize); 769 | avpkt.data = fReceiveBufferAV; //+2; 770 | // avpkt.data = fReceiveBuffer; //+2; 771 | len = avcodec_decode_video2 (c, picture, &got_picture, &avpkt); 772 | if (len < 0) { 773 | envir() << "Error while decoding frame" << frame; 774 | // exit(6); 775 | } 776 | if (got_picture) { 777 | // do something with it 778 | SDL_LockYUVOverlay(bmp); 779 | 780 | AVPicture pict; 781 | pict.data[0] = bmp->pixels[0]; 782 | pict.data[1] = bmp->pixels[2]; 783 | pict.data[2] = bmp->pixels[1]; 784 | 785 | pict.linesize[0] = bmp->pitches[0]; 786 | pict.linesize[1] = bmp->pitches[2]; 787 | pict.linesize[2] = bmp->pitches[1]; 788 | 789 | struct SwsContext *sws; 790 | sws = sws_getContext( 791 | c->width, 792 | c->height, 793 | PIX_FMT_YUV420P, 794 | c->width, 795 | c->height, 796 | PIX_FMT_YUV420P, 797 | SWS_BICUBIC, 798 | NULL, 799 | NULL, 800 | NULL 801 | ); 802 | sws_scale( 803 | sws, 804 | picture->data, 805 | picture->linesize, 806 | 0, 807 | c->height, 808 | pict.data, 809 | pict.linesize 810 | ); 811 | 812 | 813 | 814 | SDL_UnlockYUVOverlay(bmp); 815 | 816 | rect.x = 0; 817 | rect.y = 0; 818 | rect.w = c->width; 819 | rect.h = c->height; 820 | SDL_DisplayYUVOverlay(bmp, &rect); 821 | 822 | 823 | /* 824 | char fname[256]={0}; 825 | sprintf(fname, "OriginalYUV%d.pgm",frame); 826 | pgm_save ( 827 | picture->data[0], 828 | picture->linesize[0], 829 | c->width, 830 | c->height, 831 | fname 832 | ); 833 | */ 834 | sws_freeContext(sws); 835 | frame ++; 836 | } else { 837 | envir() << "no picture :( !\n"; 838 | } 839 | } 840 | 841 | } 842 | 843 | // Then continue, to request the next frame of data: 844 | continuePlaying(); 845 | } 846 | 847 | Boolean DummySink::continuePlaying() { 848 | if (fSource == NULL) return False; // sanity check (should not happen) 849 | 850 | // Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives: 851 | fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE, 852 | afterGettingFrame, this, 853 | onSourceClosure, this); 854 | return True; 855 | } 856 | -------------------------------------------------------------------------------- /demoLive555withFFMPEG.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 2012 4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "demoLive555withFFMPEG", "demoLive555withFFMPEG.vcxproj", "{A65346B9-01C7-4594-85DD-F8BC98CBEF60}" 5 | EndProject 6 | Global 7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 8 | Debug|Win32 = Debug|Win32 9 | Release|Win32 = Release|Win32 10 | EndGlobalSection 11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 12 | {A65346B9-01C7-4594-85DD-F8BC98CBEF60}.Debug|Win32.ActiveCfg = Debug|Win32 13 | {A65346B9-01C7-4594-85DD-F8BC98CBEF60}.Debug|Win32.Build.0 = Debug|Win32 14 | {A65346B9-01C7-4594-85DD-F8BC98CBEF60}.Release|Win32.ActiveCfg = Release|Win32 15 | {A65346B9-01C7-4594-85DD-F8BC98CBEF60}.Release|Win32.Build.0 = Release|Win32 16 | EndGlobalSection 17 | GlobalSection(SolutionProperties) = preSolution 18 | HideSolutionNode = FALSE 19 | EndGlobalSection 20 | EndGlobal 21 | -------------------------------------------------------------------------------- /demoLive555withFFMPEG.vcxproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | 14 | {A65346B9-01C7-4594-85DD-F8BC98CBEF60} 15 | Win32Proj 16 | demoLive555withFFMPEG 17 | 18 | 19 | 20 | Application 21 | true 22 | v110 23 | Unicode 24 | 25 | 26 | Application 27 | false 28 | v110 29 | true 30 | Unicode 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | true 44 | 45 | 46 | false 47 | 48 | 49 | 50 | 51 | 52 | Level3 53 | Disabled 54 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) 55 | ReferenceLib\live555_20130816\BasicUsageEnvironment\include;ReferenceLib\live555_20130816\groupsock\include;ReferenceLib\live555_20130816\liveMedia\include;ReferenceLib\live555_20130816\UsageEnvironment\include;ReferenceLib\SDL-1.2.15\include;ReferenceLib\ffmpeg-2.0.1-win32-dev\include 56 | 57 | 58 | Console 59 | true 60 | ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;ReferenceLib\live555_20130816\libs\libBasicUsageEnvironment.lib;ReferenceLib\live555_20130816\libs\libgroupsock.lib;ReferenceLib\live555_20130816\libs\libliveMedia.lib;ReferenceLib\live555_20130816\libs\libUsageEnvironment.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\avcodec.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\avformat.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\avutil.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\avfilter.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\avdevice.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\swscale.lib;ReferenceLib\ffmpeg-2.0.1-win32-dev\lib\swresample.lib;ReferenceLib\SDL-1.2.15\lib\x86\SDL.lib;ReferenceLib\SDL-1.2.15\lib\x86\SDLmain.lib;%(AdditionalDependencies) 61 | 62 | 63 | 64 | 65 | Level3 66 | 67 | 68 | MaxSpeed 69 | true 70 | true 71 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 72 | 73 | 74 | Console 75 | true 76 | true 77 | true 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | --------------------------------------------------------------------------------