├── .github ├── ISSUE_TEMPLATE │ ├── 1_broken_site.md │ ├── 2_site_support_request.md │ ├── 3_site_feature_request.md │ ├── 4_bug_report.md │ ├── 5_feature_request.md │ └── 6_question.md ├── ISSUE_TEMPLATE_tmpl │ ├── 1_broken_site.md │ ├── 2_site_support_request.md │ ├── 3_site_feature_request.md │ ├── 4_bug_report.md │ └── 5_feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── build.yml │ └── python-publish.yml.disable ├── .gitignore ├── .travis.yml ├── AUTHORS ├── CONTRIBUTING.md ├── ChangeLog ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── devscripts ├── SizeOfImage.patch ├── SizeOfImage_w.patch ├── bash-completion.in ├── bash-completion.py ├── buildserver.py ├── check-porn.py ├── create-github-release.py ├── fish-completion.in ├── fish-completion.py ├── generate_aes_testdata.py ├── gh-pages │ ├── add-version.py │ ├── generate-download.py │ ├── sign-versions.py │ ├── update-copyright.py │ ├── update-feed.py │ └── update-sites.py ├── install_jython.sh ├── lazy_load_template.py ├── make_contributing.py ├── make_issue_template.py ├── make_lazy_extractors.py ├── make_readme.py ├── make_supportedsites.py ├── posix-locale.sh ├── prepare_manpage.py ├── release.sh ├── run_tests.sh ├── show-downloads-statistics.py ├── wine-py2exe.sh ├── zsh-completion.in └── zsh-completion.py ├── docs ├── .gitignore ├── Makefile ├── conf.py ├── faq.md ├── index.rst ├── module_guide.rst └── supportedsites.md ├── make_win.bat ├── pyinst.py ├── pyinst32.py ├── scripts ├── update-version-workflow.py └── update-version.py ├── setup.cfg ├── setup.py ├── test ├── __init__.py ├── helper.py ├── parameters.json ├── swftests │ ├── .gitignore │ ├── ArrayAccess.as │ ├── ClassCall.as │ ├── ClassConstruction.as │ ├── ConstArrayAccess.as │ ├── ConstantInt.as │ ├── DictCall.as │ ├── EqualsOperator.as │ ├── LocalVars.as │ ├── MemberAssignment.as │ ├── NeOperator.as │ ├── PrivateCall.as │ ├── PrivateVoidCall.as │ ├── StaticAssignment.as │ ├── StaticRetrieval.as │ ├── StringBasics.as │ ├── StringCharCodeAt.as │ └── StringConversion.as ├── test_InfoExtractor.py ├── test_YoutubeDL.py ├── test_YoutubeDLCookieJar.py ├── test_aes.py ├── test_age_restriction.py ├── test_all_urls.py ├── test_cache.py ├── test_compat.py ├── test_download.py ├── test_downloader_http.py ├── test_execution.py ├── test_http.py ├── test_iqiyi_sdk_interpreter.py ├── test_jsinterp.py ├── test_netrc.py ├── test_options.py ├── test_postprocessors.py ├── test_socks.py ├── test_subtitles.py ├── test_swfinterp.py ├── test_unicode_literals.py ├── test_update.py ├── test_utils.py ├── test_verbose_output.py ├── test_write_annotations.py ├── test_youtube_chapters.py ├── test_youtube_lists.py ├── test_youtube_signature.py ├── testcert.pem ├── testdata │ ├── cookies │ │ ├── httponly_cookies.txt │ │ ├── malformed_cookies.txt │ │ └── session_cookies.txt │ ├── f4m │ │ └── custom_base_url.f4m │ ├── m3u8 │ │ ├── pluzz_francetv_11507.m3u8 │ │ ├── teamcoco_11995.m3u8 │ │ ├── ted_18923.m3u8 │ │ ├── toggle_mobile_12211.m3u8 │ │ ├── twitch_vod.m3u8 │ │ └── vidio.m3u8 │ ├── mpd │ │ ├── float_duration.mpd │ │ ├── unfragmented.mpd │ │ └── urls_only.mpd │ └── xspf │ │ └── foo_xspf.xspf └── versions.json ├── tox.ini ├── win ├── icon │ └── cloud.ico └── ver.txt └── youtube_dlc ├── YoutubeDL.py ├── __init__.py ├── __main__.py ├── aes.py ├── cache.py ├── compat.py ├── downloader ├── __init__.py ├── common.py ├── dash.py ├── external.py ├── f4m.py ├── fragment.py ├── hls.py ├── http.py ├── ism.py ├── rtmp.py ├── rtsp.py └── youtube_live_chat.py ├── extractor ├── __init__.py ├── abc.py ├── abcnews.py ├── abcotvs.py ├── academicearth.py ├── acast.py ├── adn.py ├── adobeconnect.py ├── adobepass.py ├── adobetv.py ├── adultswim.py ├── aenetworks.py ├── afreecatv.py ├── airmozilla.py ├── aliexpress.py ├── aljazeera.py ├── allocine.py ├── alphaporno.py ├── alura.py ├── amara.py ├── amcnetworks.py ├── americastestkitchen.py ├── amp.py ├── animeondemand.py ├── anvato.py ├── aol.py ├── apa.py ├── aparat.py ├── appleconnect.py ├── appletrailers.py ├── archiveorg.py ├── ard.py ├── arkena.py ├── arte.py ├── asiancrush.py ├── atresplayer.py ├── atttechchannel.py ├── atvat.py ├── audimedia.py ├── audioboom.py ├── audiomack.py ├── awaan.py ├── aws.py ├── azmedien.py ├── baidu.py ├── bandcamp.py ├── bbc.py ├── beampro.py ├── beatport.py ├── beeg.py ├── behindkink.py ├── bellmedia.py ├── bet.py ├── bfi.py ├── bigflix.py ├── bild.py ├── bilibili.py ├── biobiochiletv.py ├── biqle.py ├── bitchute.py ├── bitwave.py ├── bleacherreport.py ├── blinkx.py ├── bloomberg.py ├── bokecc.py ├── bostonglobe.py ├── box.py ├── bpb.py ├── br.py ├── bravotv.py ├── breakcom.py ├── brightcove.py ├── businessinsider.py ├── buzzfeed.py ├── byutv.py ├── c56.py ├── camdemy.py ├── cammodels.py ├── camtube.py ├── camwithher.py ├── canalc2.py ├── canalplus.py ├── canvas.py ├── carambatv.py ├── cartoonnetwork.py ├── cbc.py ├── cbs.py ├── cbsinteractive.py ├── cbslocal.py ├── cbsnews.py ├── cbssports.py ├── ccc.py ├── ccma.py ├── cctv.py ├── cda.py ├── ceskatelevize.py ├── channel9.py ├── charlierose.py ├── chaturbate.py ├── chilloutzone.py ├── chirbit.py ├── cinchcast.py ├── cinemax.py ├── ciscolive.py ├── cjsw.py ├── cliphunter.py ├── clippit.py ├── cliprs.py ├── clipsyndicate.py ├── closertotruth.py ├── cloudflarestream.py ├── cloudy.py ├── clubic.py ├── clyp.py ├── cmt.py ├── cnbc.py ├── cnn.py ├── comedycentral.py ├── common.py ├── commonmistakes.py ├── commonprotocols.py ├── condenast.py ├── contv.py ├── corus.py ├── coub.py ├── cracked.py ├── crackle.py ├── crooksandliars.py ├── crunchyroll.py ├── cspan.py ├── ctsnews.py ├── ctvnews.py ├── cultureunplugged.py ├── curiositystream.py ├── cwtv.py ├── dailymail.py ├── dailymotion.py ├── daum.py ├── dbtv.py ├── dctp.py ├── deezer.py ├── defense.py ├── democracynow.py ├── dfb.py ├── dhm.py ├── digg.py ├── digiteka.py ├── discovery.py ├── discoverygo.py ├── discoverynetworks.py ├── discoveryvr.py ├── disney.py ├── dispeak.py ├── dlive.py ├── doodstream.py ├── dotsub.py ├── douyutv.py ├── dplay.py ├── drbonanza.py ├── dropbox.py ├── drtuber.py ├── drtv.py ├── dtube.py ├── duboku.py ├── dumpert.py ├── dvtv.py ├── dw.py ├── eagleplatform.py ├── ebaumsworld.py ├── echomsk.py ├── egghead.py ├── ehow.py ├── eighttracks.py ├── einthusan.py ├── eitb.py ├── ellentube.py ├── elonet.py ├── elpais.py ├── embedly.py ├── engadget.py ├── eporner.py ├── eroprofile.py ├── escapist.py ├── espn.py ├── esri.py ├── europa.py ├── everyonesmixtape.py ├── expotv.py ├── expressen.py ├── extractors.py ├── extremetube.py ├── eyedotv.py ├── facebook.py ├── faz.py ├── fc2.py ├── fczenit.py ├── filmon.py ├── filmweb.py ├── firsttv.py ├── fivemin.py ├── fivetv.py ├── flickr.py ├── folketinget.py ├── footyroom.py ├── formula1.py ├── fourtube.py ├── fox.py ├── fox9.py ├── foxgay.py ├── foxnews.py ├── foxsports.py ├── franceculture.py ├── franceinter.py ├── francetv.py ├── freesound.py ├── freespeech.py ├── freshlive.py ├── frontendmasters.py ├── funimation.py ├── funk.py ├── fusion.py ├── fxnetworks.py ├── gaia.py ├── gameinformer.py ├── gamespot.py ├── gamestar.py ├── gaskrank.py ├── gazeta.py ├── gdcvault.py ├── gedi.py ├── generic.py ├── gfycat.py ├── giantbomb.py ├── giga.py ├── gigya.py ├── glide.py ├── globo.py ├── go.py ├── godtube.py ├── golem.py ├── googledrive.py ├── googleplus.py ├── googlesearch.py ├── goshgay.py ├── gputechconf.py ├── groupon.py ├── hbo.py ├── hearthisat.py ├── heise.py ├── hellporno.py ├── helsinki.py ├── hentaistigma.py ├── hgtv.py ├── hidive.py ├── historicfilms.py ├── hitbox.py ├── hitrecord.py ├── hketv.py ├── hornbunny.py ├── hotnewhiphop.py ├── hotstar.py ├── howcast.py ├── howstuffworks.py ├── hrfensehen.py ├── hrti.py ├── huajiao.py ├── huffpost.py ├── hungama.py ├── hypem.py ├── ign.py ├── imdb.py ├── imggaming.py ├── imgur.py ├── ina.py ├── inc.py ├── indavideo.py ├── infoq.py ├── instagram.py ├── internazionale.py ├── internetvideoarchive.py ├── iprima.py ├── iqiyi.py ├── ir90tv.py ├── itv.py ├── ivi.py ├── ivideon.py ├── iwara.py ├── izlesene.py ├── jamendo.py ├── jeuxvideo.py ├── joj.py ├── jove.py ├── jwplatform.py ├── kakao.py ├── kaltura.py ├── kanalplay.py ├── kankan.py ├── karaoketv.py ├── karrierevideos.py ├── keezmovies.py ├── ketnet.py ├── khanacademy.py ├── kickstarter.py ├── kinja.py ├── kinopoisk.py ├── konserthusetplay.py ├── krasview.py ├── ku6.py ├── kusi.py ├── kuwo.py ├── la7.py ├── laola1tv.py ├── lbry.py ├── lci.py ├── lcp.py ├── lecture2go.py ├── lecturio.py ├── leeco.py ├── lego.py ├── lemonde.py ├── lenta.py ├── libraryofcongress.py ├── libsyn.py ├── lifenews.py ├── limelight.py ├── line.py ├── linkedin.py ├── linuxacademy.py ├── litv.py ├── livejournal.py ├── liveleak.py ├── livestream.py ├── lnkgo.py ├── localnews8.py ├── lovehomeporn.py ├── lrt.py ├── lynda.py ├── m6.py ├── magentamusik360.py ├── mailru.py ├── malltv.py ├── mangomolo.py ├── manyvids.py ├── markiza.py ├── massengeschmacktv.py ├── matchtv.py ├── mdr.py ├── medaltv.py ├── medialaan.py ├── mediaset.py ├── mediasite.py ├── medici.py ├── megaphone.py ├── meipai.py ├── melonvod.py ├── meta.py ├── metacafe.py ├── metacritic.py ├── mgoon.py ├── mgtv.py ├── miaopai.py ├── microsoftvirtualacademy.py ├── ministrygrid.py ├── minoto.py ├── miomio.py ├── mit.py ├── mitele.py ├── mixcloud.py ├── mlb.py ├── mnet.py ├── moevideo.py ├── mofosex.py ├── mojvideo.py ├── morningstar.py ├── motherless.py ├── motorsport.py ├── movieclips.py ├── moviezine.py ├── movingimage.py ├── msn.py ├── mtv.py ├── muenchentv.py ├── mwave.py ├── mychannels.py ├── myspace.py ├── myspass.py ├── myvi.py ├── myvideoge.py ├── myvidster.py ├── nationalgeographic.py ├── naver.py ├── nba.py ├── nbc.py ├── ndr.py ├── ndtv.py ├── nerdcubed.py ├── neteasemusic.py ├── netzkino.py ├── newgrounds.py ├── newstube.py ├── nextmedia.py ├── nexx.py ├── nfl.py ├── nhk.py ├── nhl.py ├── nick.py ├── niconico.py ├── ninecninemedia.py ├── ninegag.py ├── ninenow.py ├── nintendo.py ├── nitter.py ├── njpwworld.py ├── nobelprize.py ├── noco.py ├── nonktube.py ├── noovo.py ├── normalboots.py ├── nosvideo.py ├── nova.py ├── nowness.py ├── noz.py ├── npo.py ├── npr.py ├── nrk.py ├── nrl.py ├── ntvcojp.py ├── ntvde.py ├── ntvru.py ├── nuevo.py ├── nuvid.py ├── nytimes.py ├── nzz.py ├── odatv.py ├── odnoklassniki.py ├── oktoberfesttv.py ├── once.py ├── ondemandkorea.py ├── onet.py ├── onionstudios.py ├── ooyala.py ├── openload.py ├── ora.py ├── orf.py ├── outsidetv.py ├── packtpub.py ├── pandoratv.py ├── parliamentliveuk.py ├── patreon.py ├── pbs.py ├── pearvideo.py ├── peertube.py ├── people.py ├── performgroup.py ├── periscope.py ├── philharmoniedeparis.py ├── phoenix.py ├── photobucket.py ├── picarto.py ├── piksel.py ├── pinkbike.py ├── pinterest.py ├── pladform.py ├── platzi.py ├── playfm.py ├── playplustv.py ├── plays.py ├── playtvak.py ├── playvid.py ├── playwire.py ├── pluralsight.py ├── podomatic.py ├── pokemon.py ├── polskieradio.py ├── popcorntimes.py ├── popcorntv.py ├── porn91.py ├── porncom.py ├── pornhd.py ├── pornhub.py ├── pornotube.py ├── pornovoisines.py ├── pornoxo.py ├── presstv.py ├── prosiebensat1.py ├── puhutv.py ├── puls4.py ├── pyvideo.py ├── qqmusic.py ├── r7.py ├── radiobremen.py ├── radiocanada.py ├── radiode.py ├── radiofrance.py ├── radiojavan.py ├── rai.py ├── raywenderlich.py ├── rbmaradio.py ├── rcs.py ├── rds.py ├── redbulltv.py ├── reddit.py ├── redtube.py ├── regiotv.py ├── rentv.py ├── restudy.py ├── reuters.py ├── reverbnation.py ├── rice.py ├── rmcdecouverte.py ├── ro220.py ├── rockstargames.py ├── roosterteeth.py ├── rottentomatoes.py ├── roxwel.py ├── rozhlas.py ├── rtbf.py ├── rte.py ├── rtl2.py ├── rtlnl.py ├── rtp.py ├── rts.py ├── rtve.py ├── rtvnh.py ├── rtvs.py ├── ruhd.py ├── rumble.py ├── rutube.py ├── rutv.py ├── ruutu.py ├── ruv.py ├── safari.py ├── sapo.py ├── savefrom.py ├── sbs.py ├── screencast.py ├── screencastomatic.py ├── scrippsnetworks.py ├── scte.py ├── seeker.py ├── senateisvp.py ├── sendtonews.py ├── servus.py ├── sevenplus.py ├── sexu.py ├── seznamzpravy.py ├── shahid.py ├── shared.py ├── showroomlive.py ├── sina.py ├── sixplay.py ├── sky.py ├── skyit.py ├── skylinewebcams.py ├── skynewsarabia.py ├── slideshare.py ├── slideslive.py ├── slutload.py ├── smotri.py ├── snotr.py ├── sohu.py ├── sonyliv.py ├── soundcloud.py ├── soundgasm.py ├── southpark.py ├── spankbang.py ├── spankwire.py ├── spiegel.py ├── spiegeltv.py ├── spike.py ├── sport5.py ├── sportbox.py ├── sportdeutschland.py ├── spreaker.py ├── springboardplatform.py ├── sprout.py ├── srgssr.py ├── srmediathek.py ├── stanfordoc.py ├── steam.py ├── stitcher.py ├── storyfire.py ├── streamable.py ├── streamcloud.py ├── streamcz.py ├── streetvoice.py ├── stretchinternet.py ├── stv.py ├── sunporno.py ├── sverigesradio.py ├── svt.py ├── swrmediathek.py ├── syfy.py ├── sztvhu.py ├── tagesschau.py ├── tass.py ├── tastytrade.py ├── tbs.py ├── tdslifeway.py ├── teachable.py ├── teachertube.py ├── teachingchannel.py ├── teamcoco.py ├── teamtreehouse.py ├── techtalks.py ├── ted.py ├── tele13.py ├── tele5.py ├── telebruxelles.py ├── telecinco.py ├── telegraaf.py ├── telemb.py ├── telequebec.py ├── teletask.py ├── telewebion.py ├── tennistv.py ├── tenplay.py ├── testurl.py ├── tf1.py ├── tfo.py ├── theintercept.py ├── theplatform.py ├── thescene.py ├── thestar.py ├── thesun.py ├── theweatherchannel.py ├── thisamericanlife.py ├── thisav.py ├── thisoldhouse.py ├── thisvid.py ├── threeqsdn.py ├── tiktok.py ├── tinypic.py ├── tmz.py ├── tnaflix.py ├── toggle.py ├── tonline.py ├── toongoggles.py ├── toutv.py ├── toypics.py ├── traileraddict.py ├── trilulilu.py ├── trunews.py ├── trutv.py ├── tube8.py ├── tubitv.py ├── tudou.py ├── tumblr.py ├── tunein.py ├── tunepk.py ├── turbo.py ├── turner.py ├── tv2.py ├── tv2dk.py ├── tv2hu.py ├── tv4.py ├── tv5mondeplus.py ├── tva.py ├── tvanouvelles.py ├── tvc.py ├── tvigle.py ├── tvland.py ├── tvn24.py ├── tvnet.py ├── tvnoe.py ├── tvnow.py ├── tvp.py ├── tvplay.py ├── tvplayer.py ├── tweakers.py ├── twentyfourvideo.py ├── twentymin.py ├── twentythreevideo.py ├── twitcasting.py ├── twitch.py ├── twitter.py ├── udemy.py ├── udn.py ├── ufctv.py ├── uktvplay.py ├── umg.py ├── unistra.py ├── unity.py ├── uol.py ├── uplynk.py ├── urort.py ├── urplay.py ├── usanetwork.py ├── usatoday.py ├── ustream.py ├── ustudio.py ├── varzesh3.py ├── vbox7.py ├── veehd.py ├── veoh.py ├── vesti.py ├── vevo.py ├── vgtv.py ├── vh1.py ├── vice.py ├── vidbit.py ├── viddler.py ├── videa.py ├── videodetective.py ├── videofyme.py ├── videomore.py ├── videopress.py ├── vidio.py ├── vidlii.py ├── vidme.py ├── vidzi.py ├── vier.py ├── viewlift.py ├── viidea.py ├── viki.py ├── vimeo.py ├── vimple.py ├── vine.py ├── viqeo.py ├── viu.py ├── vk.py ├── vlive.py ├── vodlocker.py ├── vodpl.py ├── vodplatform.py ├── voicerepublic.py ├── voot.py ├── voxmedia.py ├── vrak.py ├── vrt.py ├── vrv.py ├── vshare.py ├── vube.py ├── vuclip.py ├── vvvvid.py ├── vyborymos.py ├── vzaar.py ├── wakanim.py ├── walla.py ├── washingtonpost.py ├── wat.py ├── watchbox.py ├── watchindianporn.py ├── wdr.py ├── webcaster.py ├── webofstories.py ├── weibo.py ├── weiqitv.py ├── wistia.py ├── worldstarhiphop.py ├── wsj.py ├── wwe.py ├── xbef.py ├── xboxclips.py ├── xfileshare.py ├── xhamster.py ├── xiami.py ├── ximalaya.py ├── xminus.py ├── xnxx.py ├── xstream.py ├── xtube.py ├── xuite.py ├── xvideos.py ├── xxxymovies.py ├── yahoo.py ├── yandexdisk.py ├── yandexmusic.py ├── yandexvideo.py ├── yapfiles.py ├── yesjapan.py ├── yinyuetai.py ├── ynet.py ├── youjizz.py ├── youku.py ├── younow.py ├── youporn.py ├── yourporn.py ├── yourupload.py ├── youtube.py ├── zapiks.py ├── zaq1.py ├── zattoo.py ├── zdf.py ├── zingmp3.py ├── zoom.py └── zype.py ├── jsinterp.py ├── options.py ├── postprocessor ├── __init__.py ├── common.py ├── embedthumbnail.py ├── execafterdownload.py ├── ffmpeg.py ├── metadatafromtitle.py └── xattrpp.py ├── socks.py ├── swfinterp.py ├── update.py ├── utils.py └── version.py /.github/ISSUE_TEMPLATE/3_site_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Site feature request 3 | about: Request a new functionality for a site 4 | title: "[Site Request]" 5 | labels: Request 6 | assignees: '' 7 | 8 | --- 9 | 10 | 18 | 19 | 20 | ## Checklist 21 | 22 | 28 | 29 | - [ ] I'm reporting a site feature request 30 | - [ ] I've verified that I'm running youtube-dlc version **2020.10.31** 31 | - [ ] I've searched the bugtracker for similar site feature requests including closed ones 32 | 33 | 34 | ## Description 35 | 36 | 39 | 40 | WRITE DESCRIPTION HERE 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/5_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Request a new functionality unrelated to any particular site or extractor 4 | title: "[Feature Request]" 5 | labels: Request 6 | assignees: '' 7 | 8 | --- 9 | 10 | 18 | 19 | 20 | ## Checklist 21 | 22 | 28 | 29 | - [ ] I'm reporting a feature request 30 | - [ ] I've verified that I'm running youtube-dlc version **2020.10.31** 31 | - [ ] I've searched the bugtracker for similar feature requests including closed ones 32 | 33 | 34 | ## Description 35 | 36 | 39 | 40 | WRITE DESCRIPTION HERE 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/6_question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ask question 3 | about: Ask youtube-dl related question 4 | title: "[Question]" 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | 18 | 19 | 20 | ## Checklist 21 | 22 | 28 | 29 | - [ ] I'm asking a question 30 | - [ ] I've looked through the README and FAQ for similar questions 31 | - [ ] I've searched the bugtracker for similar questions including closed ones 32 | 33 | 34 | ## Question 35 | 36 | 39 | 40 | WRITE QUESTION HERE 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Site feature request 3 | about: Request a new functionality for a site 4 | title: '' 5 | --- 6 | 7 | 15 | 16 | 17 | ## Checklist 18 | 19 | 25 | 26 | - [ ] I'm reporting a site feature request 27 | - [ ] I've verified that I'm running youtube-dlc version **%(version)s** 28 | - [ ] I've searched the bugtracker for similar site feature requests including closed ones 29 | 30 | 31 | ## Description 32 | 33 | 36 | 37 | WRITE DESCRIPTION HERE 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE_tmpl/5_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Request a new functionality unrelated to any particular site or extractor 4 | title: '' 5 | labels: 'request' 6 | --- 7 | 8 | 16 | 17 | 18 | ## Checklist 19 | 20 | 26 | 27 | - [ ] I'm reporting a feature request 28 | - [ ] I've verified that I'm running youtube-dlc version **%(version)s** 29 | - [ ] I've searched the bugtracker for similar feature requests including closed ones 30 | 31 | 32 | ## Description 33 | 34 | 37 | 38 | WRITE DESCRIPTION HERE 39 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Please follow the guide below 2 | 3 | - You will be asked some questions, please read them **carefully** and answer honestly 4 | - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) 5 | - Use *Preview* tab to see how your *pull request* will actually look like 6 | 7 | --- 8 | 9 | ### Before submitting a *pull request* make sure you have: 10 | - [ ] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections 11 | - [ ] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests 12 | - [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) 13 | 14 | ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: 15 | - [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) 16 | - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) 17 | 18 | ### What is the purpose of your *pull request*? 19 | - [ ] Bug fix 20 | - [ ] Improvement 21 | - [ ] New extractor 22 | - [ ] New feature 23 | 24 | --- 25 | 26 | ### Description of your *pull request* and other information 27 | 28 | Explanation of your *pull request* in arbitrary form goes here. Please make sure the description explains the purpose and effect of your *pull request* and is worded well enough to be understood. Provide as much context and examples as possible. 29 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml.disable: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | push: 8 | branches: 9 | - release 10 | 11 | jobs: 12 | deploy: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Python 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: '3.x' 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install setuptools wheel twine 26 | - name: Build and publish 27 | env: 28 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 29 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 30 | run: | 31 | rm -rf dist/* 32 | python setup.py sdist bdist_wheel 33 | twine upload dist/* 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | *.class 4 | *~ 5 | *.DS_Store 6 | wine-py2exe/ 7 | py2exe.log 8 | *.kate-swp 9 | build/ 10 | dist/ 11 | MANIFEST 12 | README.txt 13 | youtube-dl.1 14 | youtube-dlc.1 15 | youtube-dl.bash-completion 16 | youtube-dlc.bash-completion 17 | youtube-dl.fish 18 | youtube-dlc.fish 19 | youtube_dl/extractor/lazy_extractors.py 20 | youtube_dlc/extractor/lazy_extractors.py 21 | youtube-dl 22 | youtube-dlc 23 | youtube-dl.exe 24 | youtube-dlc.exe 25 | youtube-dl.tar.gz 26 | youtube-dlc.tar.gz 27 | youtube-dlc.spec 28 | .coverage 29 | cover/ 30 | updates_key.pem 31 | *.egg-info 32 | *.srt 33 | *.ttml 34 | *.sbv 35 | *.vtt 36 | *.flv 37 | *.mp4 38 | *.m4a 39 | *.m4v 40 | *.mp3 41 | *.3gp 42 | *.wav 43 | *.ape 44 | *.mkv 45 | *.swf 46 | *.part 47 | *.ytdl 48 | *.swp 49 | test/local_parameters.json 50 | .tox 51 | youtube-dl.zsh 52 | youtube-dlc.zsh 53 | 54 | # IntelliJ related files 55 | .idea 56 | *.iml 57 | 58 | tmp/ 59 | venv/ 60 | 61 | # VS Code related files 62 | .vscode 63 | 64 | cookies.txt 65 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.6" 4 | - "2.7" 5 | - "3.2" 6 | - "3.3" 7 | - "3.4" 8 | - "3.5" 9 | - "3.6" 10 | - "pypy" 11 | - "pypy3" 12 | dist: trusty 13 | env: 14 | - YTDL_TEST_SET=core 15 | jobs: 16 | include: 17 | - python: 3.7 18 | dist: xenial 19 | env: YTDL_TEST_SET=core 20 | - python: 3.8 21 | dist: xenial 22 | env: YTDL_TEST_SET=core 23 | - python: 3.8-dev 24 | dist: xenial 25 | env: YTDL_TEST_SET=core 26 | - env: JYTHON=true; YTDL_TEST_SET=core 27 | - name: flake8 28 | python: 3.8 29 | dist: xenial 30 | install: pip install flake8 31 | script: flake8 . 32 | fast_finish: true 33 | allow_failures: 34 | - env: YTDL_TEST_SET=download 35 | - env: JYTHON=true; YTDL_TEST_SET=core 36 | before_install: 37 | - if [ "$JYTHON" == "true" ]; then ./devscripts/install_jython.sh; export PATH="$HOME/jython/bin:$PATH"; fi 38 | script: ./devscripts/run_tests.sh 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | include AUTHORS 4 | include ChangeLog 5 | include youtube-dlc.bash-completion 6 | include youtube-dlc.fish 7 | include youtube-dlc.1 8 | recursive-include docs Makefile conf.py *.rst 9 | recursive-include test * 10 | -------------------------------------------------------------------------------- /devscripts/SizeOfImage.patch: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blackjack4494/yt-dlc/f9401f2a91987068139c5f757b12fc711d4c0cee/devscripts/SizeOfImage.patch -------------------------------------------------------------------------------- /devscripts/SizeOfImage_w.patch: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blackjack4494/yt-dlc/f9401f2a91987068139c5f757b12fc711d4c0cee/devscripts/SizeOfImage_w.patch -------------------------------------------------------------------------------- /devscripts/bash-completion.in: -------------------------------------------------------------------------------- 1 | __youtube_dlc() 2 | { 3 | local cur prev opts fileopts diropts keywords 4 | COMPREPLY=() 5 | cur="${COMP_WORDS[COMP_CWORD]}" 6 | prev="${COMP_WORDS[COMP_CWORD-1]}" 7 | opts="{{flags}}" 8 | keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" 9 | fileopts="-a|--batch-file|--download-archive|--cookies|--load-info" 10 | diropts="--cache-dir" 11 | 12 | if [[ ${prev} =~ ${fileopts} ]]; then 13 | COMPREPLY=( $(compgen -f -- ${cur}) ) 14 | return 0 15 | elif [[ ${prev} =~ ${diropts} ]]; then 16 | COMPREPLY=( $(compgen -d -- ${cur}) ) 17 | return 0 18 | fi 19 | 20 | if [[ ${cur} =~ : ]]; then 21 | COMPREPLY=( $(compgen -W "${keywords}" -- ${cur}) ) 22 | return 0 23 | elif [[ ${cur} == * ]] ; then 24 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) 25 | return 0 26 | fi 27 | } 28 | 29 | complete -F __youtube_dlc youtube-dlc 30 | -------------------------------------------------------------------------------- /devscripts/bash-completion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | import os 5 | from os.path import dirname as dirn 6 | import sys 7 | 8 | sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) 9 | import youtube_dlc 10 | 11 | BASH_COMPLETION_FILE = "youtube-dlc.bash-completion" 12 | BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" 13 | 14 | 15 | def build_completion(opt_parser): 16 | opts_flag = [] 17 | for group in opt_parser.option_groups: 18 | for option in group.option_list: 19 | # for every long flag 20 | opts_flag.append(option.get_opt_string()) 21 | with open(BASH_COMPLETION_TEMPLATE) as f: 22 | template = f.read() 23 | with open(BASH_COMPLETION_FILE, "w") as f: 24 | # just using the special char 25 | filled_template = template.replace("{{flags}}", " ".join(opts_flag)) 26 | f.write(filled_template) 27 | 28 | 29 | parser = youtube_dlc.parseOpts()[0] 30 | build_completion(parser) 31 | -------------------------------------------------------------------------------- /devscripts/fish-completion.in: -------------------------------------------------------------------------------- 1 | 2 | {{commands}} 3 | 4 | 5 | complete --command youtube-dlc --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" 6 | -------------------------------------------------------------------------------- /devscripts/generate_aes_testdata.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import codecs 4 | import subprocess 5 | 6 | import os 7 | import sys 8 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | 10 | from youtube_dlc.utils import intlist_to_bytes 11 | from youtube_dlc.aes import aes_encrypt, key_expansion 12 | 13 | secret_msg = b'Secret message goes here' 14 | 15 | 16 | def hex_str(int_list): 17 | return codecs.encode(intlist_to_bytes(int_list), 'hex') 18 | 19 | 20 | def openssl_encode(algo, key, iv): 21 | cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)] 22 | prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) 23 | out, _ = prog.communicate(secret_msg) 24 | return out 25 | 26 | 27 | iv = key = [0x20, 0x15] + 14 * [0] 28 | 29 | r = openssl_encode('aes-128-cbc', key, iv) 30 | print('aes_cbc_decrypt') 31 | print(repr(r)) 32 | 33 | password = key 34 | new_key = aes_encrypt(password, key_expansion(password)) 35 | r = openssl_encode('aes-128-ctr', new_key, iv) 36 | print('aes_decrypt_text 16') 37 | print(repr(r)) 38 | 39 | password = key + 16 * [0] 40 | new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16) 41 | r = openssl_encode('aes-256-ctr', new_key, iv) 42 | print('aes_decrypt_text 32') 43 | print(repr(r)) 44 | -------------------------------------------------------------------------------- /devscripts/gh-pages/add-version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import unicode_literals 3 | 4 | import json 5 | import sys 6 | import hashlib 7 | import os.path 8 | 9 | 10 | if len(sys.argv) <= 1: 11 | print('Specify the version number as parameter') 12 | sys.exit() 13 | version = sys.argv[1] 14 | 15 | with open('update/LATEST_VERSION', 'w') as f: 16 | f.write(version) 17 | 18 | versions_info = json.load(open('update/versions.json')) 19 | if 'signature' in versions_info: 20 | del versions_info['signature'] 21 | 22 | new_version = {} 23 | 24 | filenames = { 25 | 'bin': 'youtube-dlc', 26 | 'exe': 'youtube-dlc.exe', 27 | 'tar': 'youtube-dlc-%s.tar.gz' % version} 28 | build_dir = os.path.join('..', '..', 'build', version) 29 | for key, filename in filenames.items(): 30 | url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename) 31 | fn = os.path.join(build_dir, filename) 32 | with open(fn, 'rb') as f: 33 | data = f.read() 34 | if not data: 35 | raise ValueError('File %s is empty!' % fn) 36 | sha256sum = hashlib.sha256(data).hexdigest() 37 | new_version[key] = (url, sha256sum) 38 | 39 | versions_info['versions'][version] = new_version 40 | versions_info['latest'] = version 41 | 42 | with open('update/versions.json', 'w') as jsonf: 43 | json.dump(versions_info, jsonf, indent=4, sort_keys=True) 44 | -------------------------------------------------------------------------------- /devscripts/gh-pages/generate-download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import unicode_literals 3 | 4 | import json 5 | 6 | versions_info = json.load(open('update/versions.json')) 7 | version = versions_info['latest'] 8 | version_dict = versions_info['versions'][version] 9 | 10 | # Read template page 11 | with open('download.html.in', 'r', encoding='utf-8') as tmplf: 12 | template = tmplf.read() 13 | 14 | template = template.replace('@PROGRAM_VERSION@', version) 15 | template = template.replace('@PROGRAM_URL@', version_dict['bin'][0]) 16 | template = template.replace('@PROGRAM_SHA256SUM@', version_dict['bin'][1]) 17 | template = template.replace('@EXE_URL@', version_dict['exe'][0]) 18 | template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1]) 19 | template = template.replace('@TAR_URL@', version_dict['tar'][0]) 20 | template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1]) 21 | with open('download.html', 'w', encoding='utf-8') as dlf: 22 | dlf.write(template) 23 | -------------------------------------------------------------------------------- /devscripts/gh-pages/sign-versions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import unicode_literals, with_statement 3 | 4 | import rsa 5 | import json 6 | from binascii import hexlify 7 | 8 | try: 9 | input = raw_input 10 | except NameError: 11 | pass 12 | 13 | versions_info = json.load(open('update/versions.json')) 14 | if 'signature' in versions_info: 15 | del versions_info['signature'] 16 | 17 | print('Enter the PKCS1 private key, followed by a blank line:') 18 | privkey = b'' 19 | while True: 20 | try: 21 | line = input() 22 | except EOFError: 23 | break 24 | if line == '': 25 | break 26 | privkey += line.encode('ascii') + b'\n' 27 | privkey = rsa.PrivateKey.load_pkcs1(privkey) 28 | 29 | signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() 30 | print('signature: ' + signature) 31 | 32 | versions_info['signature'] = signature 33 | with open('update/versions.json', 'w') as versionsf: 34 | json.dump(versions_info, versionsf, indent=4, sort_keys=True) 35 | -------------------------------------------------------------------------------- /devscripts/gh-pages/update-copyright.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | from __future__ import with_statement, unicode_literals 5 | 6 | import datetime 7 | import glob 8 | import io # For Python 2 compatibility 9 | import os 10 | import re 11 | 12 | year = str(datetime.datetime.now().year) 13 | for fn in glob.glob('*.html*'): 14 | with io.open(fn, encoding='utf-8') as f: 15 | content = f.read() 16 | newc = re.sub(r'(?PCopyright © 2011-)(?P[0-9]{4})', 'Copyright © 2011-' + year, content) 17 | if content != newc: 18 | tmpFn = fn + '.part' 19 | with io.open(tmpFn, 'wt', encoding='utf-8') as outf: 20 | outf.write(newc) 21 | os.rename(tmpFn, fn) 22 | -------------------------------------------------------------------------------- /devscripts/gh-pages/update-sites.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import unicode_literals 3 | 4 | import sys 5 | import os 6 | import textwrap 7 | 8 | # We must be able to import youtube_dlc 9 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 10 | 11 | import youtube_dlc 12 | 13 | 14 | def main(): 15 | with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: 16 | template = tmplf.read() 17 | 18 | ie_htmls = [] 19 | for ie in youtube_dlc.list_extractors(age_limit=None): 20 | ie_html = '{}'.format(ie.IE_NAME) 21 | ie_desc = getattr(ie, 'IE_DESC', None) 22 | if ie_desc is False: 23 | continue 24 | elif ie_desc is not None: 25 | ie_html += ': {}'.format(ie.IE_DESC) 26 | if not ie.working(): 27 | ie_html += ' (Currently broken)' 28 | ie_htmls.append('
  • {}
  • '.format(ie_html)) 29 | 30 | template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t')) 31 | 32 | with open('supportedsites.html', 'w', encoding='utf-8') as sitesf: 33 | sitesf.write(template) 34 | 35 | 36 | if __name__ == '__main__': 37 | main() 38 | -------------------------------------------------------------------------------- /devscripts/install_jython.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | wget http://central.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar 4 | java -jar jython-installer-2.7.1.jar -s -d "$HOME/jython" 5 | $HOME/jython/bin/jython -m pip install nose 6 | -------------------------------------------------------------------------------- /devscripts/lazy_load_template.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | 7 | class LazyLoadExtractor(object): 8 | _module = None 9 | 10 | @classmethod 11 | def ie_key(cls): 12 | return cls.__name__[:-2] 13 | 14 | def __new__(cls, *args, **kwargs): 15 | mod = __import__(cls._module, fromlist=(cls.__name__,)) 16 | real_cls = getattr(mod, cls.__name__) 17 | instance = real_cls.__new__(real_cls) 18 | instance.__init__(*args, **kwargs) 19 | return instance 20 | -------------------------------------------------------------------------------- /devscripts/make_contributing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | # import io 5 | import optparse 6 | # import re 7 | 8 | 9 | def main(): 10 | parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') 11 | options, args = parser.parse_args() 12 | if len(args) != 2: 13 | parser.error('Expected an input and an output filename') 14 | 15 | 16 | """ infile, outfile = args 17 | 18 | with io.open(infile, encoding='utf-8') as inf: 19 | readme = inf.read() 20 | 21 | bug_text = re.search( """ 22 | # r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) 23 | # dev_text = re.search( 24 | # r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING youtube-dlc', 25 | """ readme).group(1) 26 | 27 | out = bug_text + dev_text 28 | 29 | with io.open(outfile, 'w', encoding='utf-8') as outf: 30 | outf.write(out) """ 31 | 32 | if __name__ == '__main__': 33 | main() 34 | -------------------------------------------------------------------------------- /devscripts/make_issue_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | import io 5 | import optparse 6 | 7 | 8 | def main(): 9 | parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') 10 | options, args = parser.parse_args() 11 | if len(args) != 2: 12 | parser.error('Expected an input and an output filename') 13 | 14 | infile, outfile = args 15 | 16 | with io.open(infile, encoding='utf-8') as inf: 17 | issue_template_tmpl = inf.read() 18 | 19 | # Get the version from youtube_dlc/version.py without importing the package 20 | exec(compile(open('youtube_dlc/version.py').read(), 21 | 'youtube_dlc/version.py', 'exec')) 22 | 23 | out = issue_template_tmpl % {'version': locals()['__version__']} 24 | 25 | with io.open(outfile, 'w', encoding='utf-8') as outf: 26 | outf.write(out) 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /devscripts/make_readme.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import io 4 | import sys 5 | import re 6 | 7 | README_FILE = 'README.md' 8 | helptext = sys.stdin.read() 9 | 10 | if isinstance(helptext, bytes): 11 | helptext = helptext.decode('utf-8') 12 | 13 | with io.open(README_FILE, encoding='utf-8') as f: 14 | oldreadme = f.read() 15 | 16 | header = oldreadme[:oldreadme.index('# OPTIONS')] 17 | # footer = oldreadme[oldreadme.index('# CONFIGURATION'):] 18 | 19 | options = helptext[helptext.index(' General Options:') + 19:] 20 | options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options) 21 | options = '# OPTIONS\n' + options + '\n' 22 | 23 | with io.open(README_FILE, 'w', encoding='utf-8') as f: 24 | f.write(header) 25 | f.write(options) 26 | # f.write(footer) 27 | -------------------------------------------------------------------------------- /devscripts/make_supportedsites.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | import io 5 | import optparse 6 | import os 7 | import sys 8 | 9 | 10 | # Import youtube_dlc 11 | ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') 12 | sys.path.insert(0, ROOT_DIR) 13 | import youtube_dlc 14 | 15 | 16 | def main(): 17 | parser = optparse.OptionParser(usage='%prog OUTFILE.md') 18 | options, args = parser.parse_args() 19 | if len(args) != 1: 20 | parser.error('Expected an output filename') 21 | 22 | outfile, = args 23 | 24 | def gen_ies_md(ies): 25 | for ie in ies: 26 | ie_md = '**{0}**'.format(ie.IE_NAME) 27 | ie_desc = getattr(ie, 'IE_DESC', None) 28 | if ie_desc is False: 29 | continue 30 | if ie_desc is not None: 31 | ie_md += ': {0}'.format(ie.IE_DESC) 32 | if not ie.working(): 33 | ie_md += ' (Currently broken)' 34 | yield ie_md 35 | 36 | ies = sorted(youtube_dlc.gen_extractors(), key=lambda i: i.IE_NAME.lower()) 37 | out = '# Supported sites\n' + ''.join( 38 | ' - ' + md + '\n' 39 | for md in gen_ies_md(ies)) 40 | 41 | with io.open(outfile, 'w', encoding='utf-8') as outf: 42 | outf.write(out) 43 | 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /devscripts/posix-locale.sh: -------------------------------------------------------------------------------- 1 | 2 | # source this file in your shell to get a POSIX locale (which will break many programs, but that's kind of the point) 3 | 4 | export LC_ALL=POSIX 5 | export LANG=POSIX 6 | export LANGUAGE=POSIX 7 | -------------------------------------------------------------------------------- /devscripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Keep this list in sync with the `offlinetest` target in Makefile 4 | DOWNLOAD_TESTS="age_restriction|download|iqiyi_sdk_interpreter|socks|subtitles|write_annotations|youtube_lists|youtube_signature" 5 | 6 | test_set="" 7 | multiprocess_args="" 8 | 9 | case "$YTDL_TEST_SET" in 10 | core) 11 | test_set="-I test_($DOWNLOAD_TESTS)\.py" 12 | ;; 13 | download) 14 | test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py" 15 | multiprocess_args="--processes=4 --process-timeout=540" 16 | ;; 17 | *) 18 | break 19 | ;; 20 | esac 21 | 22 | nosetests test --verbose $test_set $multiprocess_args 23 | -------------------------------------------------------------------------------- /devscripts/show-downloads-statistics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | import itertools 5 | import json 6 | import os 7 | import re 8 | import sys 9 | 10 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 11 | 12 | from youtube_dlc.compat import ( 13 | compat_print, 14 | compat_urllib_request, 15 | ) 16 | from youtube_dlc.utils import format_bytes 17 | 18 | 19 | def format_size(bytes): 20 | return '%s (%d bytes)' % (format_bytes(bytes), bytes) 21 | 22 | 23 | total_bytes = 0 24 | 25 | for page in itertools.count(1): 26 | releases = json.loads(compat_urllib_request.urlopen( 27 | 'https://api.github.com/repos/ytdl-org/youtube-dl/releases?page=%s' % page 28 | ).read().decode('utf-8')) 29 | 30 | if not releases: 31 | break 32 | 33 | for release in releases: 34 | compat_print(release['name']) 35 | for asset in release['assets']: 36 | asset_name = asset['name'] 37 | total_bytes += asset['download_count'] * asset['size'] 38 | if all(not re.match(p, asset_name) for p in ( 39 | r'^youtube-dlc$', 40 | r'^youtube-dlc-\d{4}\.\d{2}\.\d{2}(?:\.\d+)?\.tar\.gz$', 41 | r'^youtube-dlc\.exe$')): 42 | continue 43 | compat_print( 44 | ' %s size: %s downloads: %d' 45 | % (asset_name, format_size(asset['size']), asset['download_count'])) 46 | 47 | compat_print('total downloads traffic: %s' % format_size(total_bytes)) 48 | -------------------------------------------------------------------------------- /devscripts/zsh-completion.in: -------------------------------------------------------------------------------- 1 | #compdef youtube-dlc 2 | 3 | __youtube_dlc() { 4 | local curcontext="$curcontext" fileopts diropts cur prev 5 | typeset -A opt_args 6 | fileopts="{{fileopts}}" 7 | diropts="{{diropts}}" 8 | cur=$words[CURRENT] 9 | case $cur in 10 | :) 11 | _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)' 12 | ;; 13 | *) 14 | prev=$words[CURRENT-1] 15 | if [[ ${prev} =~ ${fileopts} ]]; then 16 | _path_files 17 | elif [[ ${prev} =~ ${diropts} ]]; then 18 | _path_files -/ 19 | elif [[ ${prev} == "--remux-video" ]]; then 20 | _arguments '*: :(mp4 mkv)' 21 | elif [[ ${prev} == "--recode-video" ]]; then 22 | _arguments '*: :(mp4 flv ogg webm mkv)' 23 | else 24 | _arguments '*: :({{flags}})' 25 | fi 26 | ;; 27 | esac 28 | } 29 | 30 | __youtube_dlc -------------------------------------------------------------------------------- /devscripts/zsh-completion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | import os 5 | from os.path import dirname as dirn 6 | import sys 7 | 8 | sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) 9 | import youtube_dlc 10 | 11 | ZSH_COMPLETION_FILE = "youtube-dlc.zsh" 12 | ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in" 13 | 14 | 15 | def build_completion(opt_parser): 16 | opts = [opt for group in opt_parser.option_groups 17 | for opt in group.option_list] 18 | opts_file = [opt for opt in opts if opt.metavar == "FILE"] 19 | opts_dir = [opt for opt in opts if opt.metavar == "DIR"] 20 | 21 | fileopts = [] 22 | for opt in opts_file: 23 | if opt._short_opts: 24 | fileopts.extend(opt._short_opts) 25 | if opt._long_opts: 26 | fileopts.extend(opt._long_opts) 27 | 28 | diropts = [] 29 | for opt in opts_dir: 30 | if opt._short_opts: 31 | diropts.extend(opt._short_opts) 32 | if opt._long_opts: 33 | diropts.extend(opt._long_opts) 34 | 35 | flags = [opt.get_opt_string() for opt in opts] 36 | 37 | with open(ZSH_COMPLETION_TEMPLATE) as f: 38 | template = f.read() 39 | 40 | template = template.replace("{{fileopts}}", "|".join(fileopts)) 41 | template = template.replace("{{diropts}}", "|".join(diropts)) 42 | template = template.replace("{{flags}}", " ".join(flags)) 43 | 44 | with open(ZSH_COMPLETION_FILE, "w") as f: 45 | f.write(template) 46 | 47 | 48 | parser = youtube_dlc.parseOpts()[0] 49 | build_completion(parser) 50 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | -------------------------------------------------------------------------------- /docs/faq.md: -------------------------------------------------------------------------------- 1 | - Q: How to redirect to another extractor? 2 | - A: 3 | - Most simple using only `url_result` 4 | ``` 5 | # get proper url first if needed. 6 | return self.url_result(url) 7 | ``` 8 | - Using `_request_webpage` and `to_screen` in addition 9 | ``` 10 | urlh = self._request_webpage( 11 | url, id, note='Downloading redirect page') 12 | url = urlh.geturl() 13 | self.to_screen('Following redirect: %s' % url) 14 | return self.url_result(url) 15 | ``` 16 | - Using `return` construction 17 | ``` 18 | return { 19 | '_type': 'url_transparent', 20 | 'url': url, 21 | 'ie_key': ExampleIE.ie_key(), 22 | 'id': id, 23 | } 24 | # Alternative if extractor supports internal uri like kaltura 25 | return { 26 | '_type': 'url_transparent', 27 | 'url': 'kaltura:%s:%s' % (partner_id, kaltura_id), 28 | 'ie_key': KalturaIE.ie_key(), 29 | 'id': id, 30 | } 31 | ``` 32 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to youtube-dlc's documentation! 2 | ====================================== 3 | 4 | *youtube-dlc* is a command-line program to download videos from YouTube.com and more sites. 5 | It can also be used in Python code. 6 | 7 | Developer guide 8 | --------------- 9 | 10 | This section contains information for using *youtube-dlc* from Python programs. 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | module_guide 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | 24 | -------------------------------------------------------------------------------- /make_win.bat: -------------------------------------------------------------------------------- 1 | py -m PyInstaller youtube_dlc\__main__.py --onefile --name youtube-dlc --version-file win\ver.txt --icon win\icon\cloud.ico --upx-exclude=vcruntime140.dll -------------------------------------------------------------------------------- /scripts/update-version-workflow.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from datetime import datetime 3 | # import urllib.request 4 | 5 | # response = urllib.request.urlopen('https://blackjack4494.github.io/youtube-dlc/update/LATEST_VERSION') 6 | # _LATEST_VERSION = response.read().decode('utf-8') 7 | 8 | exec(compile(open('youtube_dlc/version.py').read(), 'youtube_dlc/version.py', 'exec')) 9 | 10 | _LATEST_VERSION = locals()['__version__'] 11 | 12 | _OLD_VERSION = _LATEST_VERSION.rsplit("-", 1) 13 | 14 | if len(_OLD_VERSION) > 0: 15 | old_ver = _OLD_VERSION[0] 16 | 17 | old_rev = '' 18 | if len(_OLD_VERSION) > 1: 19 | old_rev = _OLD_VERSION[1] 20 | 21 | now = datetime.now() 22 | # ver = f'{datetime.today():%Y.%m.%d}' 23 | ver = now.strftime("%Y.%m.%d") 24 | rev = '' 25 | 26 | if old_ver == ver: 27 | if old_rev: 28 | rev = int(old_rev) + 1 29 | else: 30 | rev = 1 31 | 32 | _SEPARATOR = '-' 33 | 34 | version = _SEPARATOR.join(filter(None, [ver, str(rev)])) 35 | 36 | print('::set-output name=ytdlc_version::' + version) 37 | 38 | file_version_py = open('youtube_dlc/version.py', 'rt') 39 | data = file_version_py.read() 40 | data = data.replace(locals()['__version__'], version) 41 | file_version_py.close() 42 | file_version_py = open('youtube_dlc/version.py', 'wt') 43 | file_version_py.write(data) 44 | file_version_py.close() 45 | -------------------------------------------------------------------------------- /scripts/update-version.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from datetime import datetime 3 | import urllib.request 4 | 5 | response = urllib.request.urlopen('https://blackjack4494.github.io/youtube-dlc/update/LATEST_VERSION') 6 | 7 | _LATEST_VERSION = response.read().decode('utf-8') 8 | 9 | _OLD_VERSION = _LATEST_VERSION.rsplit("-", 1) 10 | 11 | if len(_OLD_VERSION) > 0: 12 | old_ver = _OLD_VERSION[0] 13 | 14 | old_rev = '' 15 | if len(_OLD_VERSION) > 1: 16 | old_rev = _OLD_VERSION[1] 17 | 18 | now = datetime.now() 19 | # ver = f'{datetime.today():%Y.%m.%d}' 20 | ver = now.strftime("%Y.%m.%d") 21 | rev = '' 22 | 23 | if old_ver == ver: 24 | if old_rev: 25 | rev = int(old_rev) + 1 26 | else: 27 | rev = 1 28 | 29 | _SEPARATOR = '-' 30 | 31 | version = _SEPARATOR.join(filter(None, [ver, str(rev)])) 32 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [wheel] 2 | universal = True 3 | 4 | [flake8] 5 | exclude = youtube_dlc/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git,venv 6 | ignore = E402,E501,E731,E741,W503 7 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blackjack4494/yt-dlc/f9401f2a91987068139c5f757b12fc711d4c0cee/test/__init__.py -------------------------------------------------------------------------------- /test/parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "consoletitle": false, 3 | "continuedl": true, 4 | "forcedescription": false, 5 | "forcefilename": false, 6 | "forceformat": false, 7 | "forcethumbnail": false, 8 | "forcetitle": false, 9 | "forceurl": false, 10 | "format": "best", 11 | "ignoreerrors": false, 12 | "listformats": null, 13 | "logtostderr": false, 14 | "matchtitle": null, 15 | "max_downloads": null, 16 | "nooverwrites": false, 17 | "nopart": false, 18 | "noprogress": false, 19 | "outtmpl": "%(id)s.%(ext)s", 20 | "password": null, 21 | "playlistend": -1, 22 | "playliststart": 1, 23 | "prefer_free_formats": false, 24 | "quiet": false, 25 | "ratelimit": null, 26 | "rejecttitle": null, 27 | "retries": 10, 28 | "simulate": false, 29 | "subtitleslang": null, 30 | "subtitlesformat": "best", 31 | "test": true, 32 | "updatetime": true, 33 | "usenetrc": false, 34 | "username": null, 35 | "verbose": true, 36 | "writedescription": false, 37 | "writeinfojson": true, 38 | "writesubtitles": false, 39 | "allsubtitles": false, 40 | "listsubtitles": false, 41 | "socket_timeout": 20, 42 | "fixup": "never" 43 | } 44 | -------------------------------------------------------------------------------- /test/swftests/.gitignore: -------------------------------------------------------------------------------- 1 | *.swf 2 | -------------------------------------------------------------------------------- /test/swftests/ArrayAccess.as: -------------------------------------------------------------------------------- 1 | // input: [["a", "b", "c", "d"]] 2 | // output: ["c", "b", "a", "d"] 3 | 4 | package { 5 | public class ArrayAccess { 6 | public static function main(ar:Array):Array { 7 | var aa:ArrayAccess = new ArrayAccess(); 8 | return aa.f(ar, 2); 9 | } 10 | 11 | private function f(ar:Array, num:Number):Array{ 12 | var x:String = ar[0]; 13 | var y:String = ar[num % ar.length]; 14 | ar[0] = y; 15 | ar[num] = x; 16 | return ar; 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test/swftests/ClassCall.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 121 3 | 4 | package { 5 | public class ClassCall { 6 | public static function main():int{ 7 | var f:OtherClass = new OtherClass(); 8 | return f.func(100,20); 9 | } 10 | } 11 | } 12 | 13 | class OtherClass { 14 | public function func(x: int, y: int):int { 15 | return x+y+1; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /test/swftests/ClassConstruction.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 0 3 | 4 | package { 5 | public class ClassConstruction { 6 | public static function main():int{ 7 | var f:Foo = new Foo(); 8 | return 0; 9 | } 10 | } 11 | } 12 | 13 | class Foo { 14 | 15 | } 16 | -------------------------------------------------------------------------------- /test/swftests/ConstArrayAccess.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 4 3 | 4 | package { 5 | public class ConstArrayAccess { 6 | private static const x:int = 2; 7 | private static const ar:Array = ["42", "3411"]; 8 | 9 | public static function main():int{ 10 | var c:ConstArrayAccess = new ConstArrayAccess(); 11 | return c.f(); 12 | } 13 | 14 | public function f(): int { 15 | return ar[1].length; 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /test/swftests/ConstantInt.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 2 3 | 4 | package { 5 | public class ConstantInt { 6 | private static const x:int = 2; 7 | 8 | public static function main():int{ 9 | return x; 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/swftests/DictCall.as: -------------------------------------------------------------------------------- 1 | // input: [{"x": 1, "y": 2}] 2 | // output: 3 3 | 4 | package { 5 | public class DictCall { 6 | public static function main(d:Object):int{ 7 | return d.x + d.y; 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/swftests/EqualsOperator.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: false 3 | 4 | package { 5 | public class EqualsOperator { 6 | public static function main():Boolean{ 7 | return 1 == 2; 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/swftests/LocalVars.as: -------------------------------------------------------------------------------- 1 | // input: [1, 2] 2 | // output: 3 3 | 4 | package { 5 | public class LocalVars { 6 | public static function main(a:int, b:int):int{ 7 | var c:int = a + b + b; 8 | var d:int = c - b; 9 | var e:int = d; 10 | return e; 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /test/swftests/MemberAssignment.as: -------------------------------------------------------------------------------- 1 | // input: [1] 2 | // output: 2 3 | 4 | package { 5 | public class MemberAssignment { 6 | public var v:int; 7 | 8 | public function g():int { 9 | return this.v; 10 | } 11 | 12 | public function f(a:int):int{ 13 | this.v = a; 14 | return this.v + this.g(); 15 | } 16 | 17 | public static function main(a:int): int { 18 | var v:MemberAssignment = new MemberAssignment(); 19 | return v.f(a); 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test/swftests/NeOperator.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 123 3 | 4 | package { 5 | public class NeOperator { 6 | public static function main(): int { 7 | var res:int = 0; 8 | if (1 != 2) { 9 | res += 3; 10 | } else { 11 | res += 4; 12 | } 13 | if (2 != 2) { 14 | res += 10; 15 | } else { 16 | res += 20; 17 | } 18 | if (9 == 9) { 19 | res += 100; 20 | } 21 | return res; 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /test/swftests/PrivateCall.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 9 3 | 4 | package { 5 | public class PrivateCall { 6 | public static function main():int{ 7 | var f:OtherClass = new OtherClass(); 8 | return f.func(); 9 | } 10 | } 11 | } 12 | 13 | class OtherClass { 14 | private function pf():int { 15 | return 9; 16 | } 17 | 18 | public function func():int { 19 | return this.pf(); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /test/swftests/PrivateVoidCall.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 9 3 | 4 | package { 5 | public class PrivateVoidCall { 6 | public static function main():int{ 7 | var f:OtherClass = new OtherClass(); 8 | f.func(); 9 | return 9; 10 | } 11 | } 12 | } 13 | 14 | class OtherClass { 15 | private function pf():void { 16 | ; 17 | } 18 | 19 | public function func():void { 20 | this.pf(); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test/swftests/StaticAssignment.as: -------------------------------------------------------------------------------- 1 | // input: [1] 2 | // output: 1 3 | 4 | package { 5 | public class StaticAssignment { 6 | public static var v:int; 7 | 8 | public static function main(a:int):int{ 9 | v = a; 10 | return v; 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /test/swftests/StaticRetrieval.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 1 3 | 4 | package { 5 | public class StaticRetrieval { 6 | public static var v:int; 7 | 8 | public static function main():int{ 9 | if (v) { 10 | return 0; 11 | } else { 12 | return 1; 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /test/swftests/StringBasics.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 3 3 | 4 | package { 5 | public class StringBasics { 6 | public static function main():int{ 7 | var s:String = "abc"; 8 | return s.length; 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/swftests/StringCharCodeAt.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 9897 3 | 4 | package { 5 | public class StringCharCodeAt { 6 | public static function main():int{ 7 | var s:String = "abc"; 8 | return s.charCodeAt(1) * 100 + s.charCodeAt(); 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/swftests/StringConversion.as: -------------------------------------------------------------------------------- 1 | // input: [] 2 | // output: 2 3 | 4 | package { 5 | public class StringConversion { 6 | public static function main():int{ 7 | var s:String = String(99); 8 | return s.length; 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/test_age_restriction.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | # Allow direct execution 5 | import os 6 | import sys 7 | import unittest 8 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | 10 | from test.helper import try_rm 11 | 12 | 13 | from youtube_dlc import YoutubeDL 14 | 15 | 16 | def _download_restricted(url, filename, age): 17 | """ Returns true if the file has been downloaded """ 18 | 19 | params = { 20 | 'age_limit': age, 21 | 'skip_download': True, 22 | 'writeinfojson': True, 23 | 'outtmpl': '%(id)s.%(ext)s', 24 | } 25 | ydl = YoutubeDL(params) 26 | ydl.add_default_info_extractors() 27 | json_filename = os.path.splitext(filename)[0] + '.info.json' 28 | try_rm(json_filename) 29 | ydl.download([url]) 30 | res = os.path.exists(json_filename) 31 | try_rm(json_filename) 32 | return res 33 | 34 | 35 | class TestAgeRestriction(unittest.TestCase): 36 | def _assert_restricted(self, url, filename, age, old_age=None): 37 | self.assertTrue(_download_restricted(url, filename, old_age)) 38 | self.assertFalse(_download_restricted(url, filename, age)) 39 | 40 | def test_youtube(self): 41 | self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10) 42 | 43 | def test_youporn(self): 44 | self._assert_restricted( 45 | 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', 46 | '505835.mp4', 2, old_age=25) 47 | 48 | 49 | if __name__ == '__main__': 50 | unittest.main() 51 | -------------------------------------------------------------------------------- /test/test_cache.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | from __future__ import unicode_literals 5 | 6 | import shutil 7 | 8 | # Allow direct execution 9 | import os 10 | import sys 11 | import unittest 12 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 13 | 14 | 15 | from test.helper import FakeYDL 16 | from youtube_dlc.cache import Cache 17 | 18 | 19 | def _is_empty(d): 20 | return not bool(os.listdir(d)) 21 | 22 | 23 | def _mkdir(d): 24 | if not os.path.exists(d): 25 | os.mkdir(d) 26 | 27 | 28 | class TestCache(unittest.TestCase): 29 | def setUp(self): 30 | TEST_DIR = os.path.dirname(os.path.abspath(__file__)) 31 | TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata') 32 | _mkdir(TESTDATA_DIR) 33 | self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test') 34 | self.tearDown() 35 | 36 | def tearDown(self): 37 | if os.path.exists(self.test_dir): 38 | shutil.rmtree(self.test_dir) 39 | 40 | def test_cache(self): 41 | ydl = FakeYDL({ 42 | 'cachedir': self.test_dir, 43 | }) 44 | c = Cache(ydl) 45 | obj = {'x': 1, 'y': ['ä', '\\a', True]} 46 | self.assertEqual(c.load('test_cache', 'k.'), None) 47 | c.store('test_cache', 'k.', obj) 48 | self.assertEqual(c.load('test_cache', 'k2'), None) 49 | self.assertFalse(_is_empty(self.test_dir)) 50 | self.assertEqual(c.load('test_cache', 'k.'), obj) 51 | self.assertEqual(c.load('test_cache', 'y'), None) 52 | self.assertEqual(c.load('test_cache2', 'k.'), None) 53 | c.remove() 54 | self.assertFalse(os.path.exists(self.test_dir)) 55 | self.assertEqual(c.load('test_cache', 'k.'), None) 56 | 57 | 58 | if __name__ == '__main__': 59 | unittest.main() 60 | -------------------------------------------------------------------------------- /test/test_execution.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | from __future__ import unicode_literals 5 | 6 | import unittest 7 | 8 | import sys 9 | import os 10 | import subprocess 11 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 12 | 13 | from youtube_dlc.utils import encodeArgument 14 | 15 | rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 16 | 17 | 18 | try: 19 | _DEV_NULL = subprocess.DEVNULL 20 | except AttributeError: 21 | _DEV_NULL = open(os.devnull, 'wb') 22 | 23 | 24 | class TestExecution(unittest.TestCase): 25 | def test_import(self): 26 | subprocess.check_call([sys.executable, '-c', 'import youtube_dlc'], cwd=rootDir) 27 | 28 | def test_module_exec(self): 29 | if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution 30 | subprocess.check_call([sys.executable, '-m', 'youtube_dlc', '--version'], cwd=rootDir, stdout=_DEV_NULL) 31 | 32 | def test_main_exec(self): 33 | subprocess.check_call([sys.executable, 'youtube_dlc/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL) 34 | 35 | def test_cmdline_umlauts(self): 36 | p = subprocess.Popen( 37 | [sys.executable, 'youtube_dlc/__main__.py', encodeArgument('ä'), '--version'], 38 | cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE) 39 | _, stderr = p.communicate() 40 | self.assertFalse(stderr) 41 | 42 | 43 | if __name__ == '__main__': 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /test/test_iqiyi_sdk_interpreter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import unicode_literals 4 | 5 | # Allow direct execution 6 | import os 7 | import sys 8 | import unittest 9 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 10 | 11 | from test.helper import FakeYDL 12 | from youtube_dlc.extractor import IqiyiIE 13 | 14 | 15 | class IqiyiIEWithCredentials(IqiyiIE): 16 | def _get_login_info(self): 17 | return 'foo', 'bar' 18 | 19 | 20 | class WarningLogger(object): 21 | def __init__(self): 22 | self.messages = [] 23 | 24 | def warning(self, msg): 25 | self.messages.append(msg) 26 | 27 | def debug(self, msg): 28 | pass 29 | 30 | def error(self, msg): 31 | pass 32 | 33 | 34 | class TestIqiyiSDKInterpreter(unittest.TestCase): 35 | def test_iqiyi_sdk_interpreter(self): 36 | ''' 37 | Test the functionality of IqiyiSDKInterpreter by trying to log in 38 | 39 | If `sign` is incorrect, /validate call throws an HTTP 556 error 40 | ''' 41 | logger = WarningLogger() 42 | ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger})) 43 | ie._login() 44 | self.assertTrue('unable to log in:' in logger.messages[0]) 45 | 46 | 47 | if __name__ == '__main__': 48 | unittest.main() 49 | -------------------------------------------------------------------------------- /test/test_netrc.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import os 5 | import sys 6 | import unittest 7 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | 10 | from youtube_dlc.extractor import ( 11 | gen_extractors, 12 | ) 13 | 14 | 15 | class TestNetRc(unittest.TestCase): 16 | def test_netrc_present(self): 17 | for ie in gen_extractors(): 18 | if not hasattr(ie, '_login'): 19 | continue 20 | self.assertTrue( 21 | hasattr(ie, '_NETRC_MACHINE'), 22 | 'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME) 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /test/test_options.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from __future__ import unicode_literals 4 | 5 | # Allow direct execution 6 | import os 7 | import sys 8 | import unittest 9 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 10 | 11 | from youtube_dlc.options import _hide_login_info 12 | 13 | 14 | class TestOptions(unittest.TestCase): 15 | def test_hide_login_info(self): 16 | self.assertEqual(_hide_login_info(['-u', 'foo', '-p', 'bar']), 17 | ['-u', 'PRIVATE', '-p', 'PRIVATE']) 18 | self.assertEqual(_hide_login_info(['-u']), ['-u']) 19 | self.assertEqual(_hide_login_info(['-u', 'foo', '-u', 'bar']), 20 | ['-u', 'PRIVATE', '-u', 'PRIVATE']) 21 | self.assertEqual(_hide_login_info(['--username=foo']), 22 | ['--username=PRIVATE']) 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /test/test_postprocessors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import unicode_literals 4 | 5 | # Allow direct execution 6 | import os 7 | import sys 8 | import unittest 9 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 10 | 11 | from youtube_dlc.postprocessor import MetadataFromTitlePP 12 | 13 | 14 | class TestMetadataFromTitle(unittest.TestCase): 15 | def test_format_to_regex(self): 16 | pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s') 17 | self.assertEqual(pp._titleregex, r'(?P.+)\ \-\ (?P<artist>.+)') 18 | -------------------------------------------------------------------------------- /test/test_update.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import unicode_literals 4 | 5 | # Allow direct execution 6 | import os 7 | import sys 8 | import unittest 9 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 10 | 11 | 12 | import json 13 | from youtube_dlc.update import rsa_verify 14 | 15 | 16 | class TestUpdate(unittest.TestCase): 17 | def test_rsa_verify(self): 18 | UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) 19 | with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f: 20 | versions_info = f.read().decode() 21 | versions_info = json.loads(versions_info) 22 | signature = versions_info['signature'] 23 | del versions_info['signature'] 24 | self.assertTrue(rsa_verify( 25 | json.dumps(versions_info, sort_keys=True).encode('utf-8'), 26 | signature, UPDATES_RSA_KEY)) 27 | 28 | 29 | if __name__ == '__main__': 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /test/testdata/cookies/httponly_cookies.txt: -------------------------------------------------------------------------------- 1 | # Netscape HTTP Cookie File 2 | # http://curl.haxx.se/rfc/cookie_spec.html 3 | # This is a generated file! Do not edit. 4 | 5 | #HttpOnly_www.foobar.foobar FALSE / TRUE 2147483647 HTTPONLY_COOKIE HTTPONLY_COOKIE_VALUE 6 | www.foobar.foobar FALSE / TRUE 2147483647 JS_ACCESSIBLE_COOKIE JS_ACCESSIBLE_COOKIE_VALUE 7 | -------------------------------------------------------------------------------- /test/testdata/cookies/malformed_cookies.txt: -------------------------------------------------------------------------------- 1 | # Netscape HTTP Cookie File 2 | # http://curl.haxx.se/rfc/cookie_spec.html 3 | # This is a generated file! Do not edit. 4 | 5 | # Cookie file entry with invalid number of fields - 6 instead of 7 6 | www.foobar.foobar FALSE / FALSE 0 COOKIE 7 | 8 | # Cookie file entry with invalid expires at 9 | www.foobar.foobar FALSE / FALSE 1.7976931348623157e+308 COOKIE VALUE 10 | -------------------------------------------------------------------------------- /test/testdata/cookies/session_cookies.txt: -------------------------------------------------------------------------------- 1 | # Netscape HTTP Cookie File 2 | # http://curl.haxx.se/rfc/cookie_spec.html 3 | # This is a generated file! Do not edit. 4 | 5 | www.foobar.foobar FALSE / TRUE YoutubeDLExpiresEmpty YoutubeDLExpiresEmptyValue 6 | www.foobar.foobar FALSE / TRUE 0 YoutubeDLExpires0 YoutubeDLExpires0Value 7 | -------------------------------------------------------------------------------- /test/testdata/f4m/custom_base_url.f4m: -------------------------------------------------------------------------------- 1 | <?xml version="1.0" encoding="UTF-8"?> 2 | <manifest xmlns="http://ns.adobe.com/f4m/1.0"> 3 | <streamType>recorded</streamType> 4 | <baseURL>http://vod.livestream.com/events/0000000000673980/</baseURL> 5 | <duration>269.293</duration> 6 | <bootstrapInfo profile="named" id="bootstrap_1">AAAAm2Fic3QAAAAAAAAAAQAAAAPoAAAAAAAEG+0AAAAAAAAAAAAAAAAAAQAAABlhc3J0AAAAAAAAAAABAAAAAQAAAC4BAAAAVmFmcnQAAAAAAAAD6AAAAAAEAAAAAQAAAAAAAAAAAAAXcAAAAC0AAAAAAAQHQAAAE5UAAAAuAAAAAAAEGtUAAAEYAAAAAAAAAAAAAAAAAAAAAAA=</bootstrapInfo> 7 | <media url="b90f532f-b0f6-4f4e-8289-706d490b2fd8_2292" bootstrapInfoId="bootstrap_1" bitrate="2148" width="1280" height="720" videoCodec="avc1.4d401f" audioCodec="mp4a.40.2"> 8 | <metadata>AgAKb25NZXRhRGF0YQgAAAAIAAhkdXJhdGlvbgBAcNSwIMSbpgAFd2lkdGgAQJQAAAAAAAAABmhlaWdodABAhoAAAAAAAAAJZnJhbWVyYXRlAEA4/7DoLwW3AA12aWRlb2RhdGFyYXRlAECe1DLgjcobAAx2aWRlb2NvZGVjaWQAQBwAAAAAAAAADWF1ZGlvZGF0YXJhdGUAQGSimlvaPKQADGF1ZGlvY29kZWNpZABAJAAAAAAAAAAACQ==</metadata> 9 | </media> 10 | </manifest> 11 | -------------------------------------------------------------------------------- /test/testdata/m3u8/teamcoco_11995.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-0",NAME="Default",AUTOSELECT=YES,DEFAULT=YES,URI="hls/CONAN_020217_Highlight_show-audio-160k_v4.m3u8" 3 | #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-1",NAME="Default",AUTOSELECT=YES,DEFAULT=YES,URI="hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8" 4 | #EXT-X-I-FRAME-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=37862000,CODECS="avc1.4d001f",URI="hls/CONAN_020217_Highlight_show-2m_iframe.m3u8" 5 | #EXT-X-I-FRAME-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=18750000,CODECS="avc1.4d001e",URI="hls/CONAN_020217_Highlight_show-1m_iframe.m3u8" 6 | #EXT-X-I-FRAME-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=6535000,CODECS="avc1.42001e",URI="hls/CONAN_020217_Highlight_show-400k_iframe.m3u8" 7 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=2374000,RESOLUTION=1024x576,CODECS="avc1.4d001f,mp4a.40.2",AUDIO="audio-0" 8 | hls/CONAN_020217_Highlight_show-2m_v4.m3u8 9 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=1205000,RESOLUTION=640x360,CODECS="avc1.4d001e,mp4a.40.2",AUDIO="audio-0" 10 | hls/CONAN_020217_Highlight_show-1m_v4.m3u8 11 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=522000,RESOLUTION=400x224,CODECS="avc1.42001e,mp4a.40.2",AUDIO="audio-0" 12 | hls/CONAN_020217_Highlight_show-400k_v4.m3u8 13 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=413000,RESOLUTION=400x224,CODECS="avc1.42001e,mp4a.40.5",AUDIO="audio-1" 14 | hls/CONAN_020217_Highlight_show-400k_v4.m3u8 15 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=71000,CODECS="mp4a.40.5",AUDIO="audio-1" 16 | hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8 17 | -------------------------------------------------------------------------------- /test/testdata/m3u8/toggle_mobile_12211.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | #EXT-X-VERSION:4 3 | #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="eng",NAME="English",URI="http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_sa2ntrdg/name/a.mp4/index.m3u8" 4 | #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="und",NAME="Undefined",URI="http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_r7y0nitg/name/a.mp4/index.m3u8" 5 | 6 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=155648,RESOLUTION=320x180,AUDIO="audio" 7 | http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_qlk9hlzr/name/a.mp4/index.m3u8 8 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=502784,RESOLUTION=480x270,AUDIO="audio" 9 | http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_oefackmi/name/a.mp4/index.m3u8 10 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=827392,RESOLUTION=640x360,AUDIO="audio" 11 | http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_vyg9pj7k/name/a.mp4/index.m3u8 12 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=1396736,RESOLUTION=854x480,AUDIO="audio" 13 | http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_50n4psvx/name/a.mp4/index.m3u8 14 | -------------------------------------------------------------------------------- /test/testdata/m3u8/vidio.m3u8: -------------------------------------------------------------------------------- 1 | #EXTM3U 2 | 3 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=300000,RESOLUTION=480x270,NAME="270p 3G" 4 | https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b300.mp4.m3u8 5 | 6 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=600000,RESOLUTION=640x360,NAME="360p SD" 7 | https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b600.mp4.m3u8 8 | 9 | #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=1200000,RESOLUTION=1280x720,NAME="720p HD" 10 | https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b1200.mp4.m3u8 11 | -------------------------------------------------------------------------------- /test/testdata/mpd/float_duration.mpd: -------------------------------------------------------------------------------- 1 | <?xml version="1.0" encoding="UTF-8"?> 2 | <MPD xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="urn:mpeg:dash:schema:mpd:2011" type="static" minBufferTime="PT2S" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT6014S"> 3 | <Period bitstreamSwitching="true"> 4 | <AdaptationSet mimeType="audio/mp4" codecs="mp4a.40.2" startWithSAP="1" segmentAlignment="true"> 5 | <SegmentTemplate timescale="1000000" presentationTimeOffset="0" initialization="ai_$RepresentationID$.mp4d" media="a_$RepresentationID$_$Number$.mp4d" duration="2000000.0" startNumber="0"></SegmentTemplate> 6 | <Representation id="318597" bandwidth="61587"></Representation> 7 | </AdaptationSet> 8 | <AdaptationSet mimeType="video/mp4" startWithSAP="1" segmentAlignment="true"> 9 | <SegmentTemplate timescale="1000000" presentationTimeOffset="0" initialization="vi_$RepresentationID$.mp4d" media="v_$RepresentationID$_$Number$.mp4d" duration="2000000.0" startNumber="0"></SegmentTemplate> 10 | <Representation id="318597" codecs="avc1.42001f" width="340" height="192" bandwidth="318597"></Representation> 11 | <Representation id="638590" codecs="avc1.42001f" width="512" height="288" bandwidth="638590"></Representation> 12 | <Representation id="1022565" codecs="avc1.4d001f" width="688" height="384" bandwidth="1022565"></Representation> 13 | <Representation id="2046506" codecs="avc1.4d001f" width="1024" height="576" bandwidth="2046506"></Representation> 14 | <Representation id="3998017" codecs="avc1.640029" width="1280" height="720" bandwidth="3998017"></Representation> 15 | <Representation id="5997485" codecs="avc1.640032" width="1920" height="1080" bandwidth="5997485"></Representation> 16 | </AdaptationSet> 17 | </Period> 18 | </MPD> -------------------------------------------------------------------------------- /test/testdata/mpd/unfragmented.mpd: -------------------------------------------------------------------------------- 1 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> 2 | <MPD mediaPresentationDuration="PT54.915S" minBufferTime="PT1.500S" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" type="static" xmlns="urn:mpeg:dash:schema:mpd:2011"> 3 | <Period duration="PT54.915S"> 4 | <AdaptationSet segmentAlignment="true" subsegmentAlignment="true" subsegmentStartsWithSAP="1"> 5 | <Representation bandwidth="804261" codecs="avc1.4d401e" frameRate="30" height="360" id="VIDEO-1" mimeType="video/mp4" startWithSAP="1" width="360"> 6 | <BaseURL>DASH_360</BaseURL> 7 | <SegmentBase indexRange="915-1114" indexRangeExact="true"> 8 | <Initialization range="0-914"/> 9 | </SegmentBase> 10 | </Representation> 11 | <Representation bandwidth="608000" codecs="avc1.4d401e" frameRate="30" height="240" id="VIDEO-2" mimeType="video/mp4" startWithSAP="1" width="240"> 12 | <BaseURL>DASH_240</BaseURL> 13 | <SegmentBase indexRange="913-1112" indexRangeExact="true"> 14 | <Initialization range="0-912"/> 15 | </SegmentBase> 16 | </Representation> 17 | </AdaptationSet> 18 | <AdaptationSet> 19 | <Representation audioSamplingRate="48000" bandwidth="129870" codecs="mp4a.40.2" id="AUDIO-1" mimeType="audio/mp4" startWithSAP="1"> 20 | <AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/> 21 | <BaseURL>audio</BaseURL> 22 | <SegmentBase indexRange="832-1007" indexRangeExact="true"> 23 | <Initialization range="0-831"/> 24 | </SegmentBase> 25 | </Representation> 26 | </AdaptationSet> 27 | </Period> 28 | </MPD> 29 | -------------------------------------------------------------------------------- /test/testdata/xspf/foo_xspf.xspf: -------------------------------------------------------------------------------- 1 | <?xml version="1.0" encoding="UTF-8"?> 2 | <playlist version="1" xmlns="http://xspf.org/ns/0/"> 3 | <date>2018-03-09T18:01:43Z</date> 4 | <trackList> 5 | <track> 6 | <location>cd1/track%201.mp3</location> 7 | <title>Pandemonium 8 | Foilverb 9 | Visit http://bigbrother404.bandcamp.com 10 | Pandemonium EP 11 | 1 12 | 202416 13 | 14 | 15 | ../%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF%E3%80%80%EF%BC%92.mp3 16 | Final Cartridge (Nichico Twelve Remix) 17 | Visit http://bigbrother404.bandcamp.com 18 | Foilverb 19 | Pandemonium EP 20 | 2 21 | 255857 22 | 23 | 24 | track3.mp3 25 | https://example.com/track3.mp3 26 | Rebuilding Nightingale 27 | Visit http://bigbrother404.bandcamp.com 28 | Foilverb 29 | Pandemonium EP 30 | 3 31 | 287915 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /test/versions.json: -------------------------------------------------------------------------------- 1 | { 2 | "latest": "2013.01.06", 3 | "signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6", 4 | "versions": { 5 | "2013.01.02": { 6 | "bin": [ 7 | "http://youtube-dl.org/downloads/2013.01.02/youtube-dl", 8 | "f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b" 9 | ], 10 | "exe": [ 11 | "http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe", 12 | "75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422" 13 | ], 14 | "tar": [ 15 | "http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz", 16 | "6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196" 17 | ] 18 | }, 19 | "2013.01.06": { 20 | "bin": [ 21 | "http://youtube-dl.org/downloads/2013.01.06/youtube-dl", 22 | "64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049" 23 | ], 24 | "exe": [ 25 | "http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe", 26 | "58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84" 27 | ], 28 | "tar": [ 29 | "http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz", 30 | "fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86" 31 | ] 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27,py33,py34,py35 3 | [testenv] 4 | deps = 5 | nose 6 | coverage 7 | # We need a valid $HOME for test_compat_expanduser 8 | passenv = HOME 9 | defaultargs = test --exclude test_download.py --exclude test_age_restriction.py 10 | --exclude test_subtitles.py --exclude test_write_annotations.py 11 | --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py 12 | --exclude test_socks.py 13 | commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dlc --cover-html 14 | # test.test_download:TestDownload.test_NowVideo 15 | -------------------------------------------------------------------------------- /win/icon/cloud.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blackjack4494/yt-dlc/f9401f2a91987068139c5f757b12fc711d4c0cee/win/icon/cloud.ico -------------------------------------------------------------------------------- /win/ver.txt: -------------------------------------------------------------------------------- 1 | # UTF-8 2 | # 3 | # For more details about fixed file info 'ffi' see: 4 | # http://msdn.microsoft.com/en-us/library/ms646997.aspx 5 | VSVersionInfo( 6 | ffi=FixedFileInfo( 7 | # filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4) 8 | # Set not needed items to zero 0. 9 | filevers=(16, 9, 2020, 0), 10 | prodvers=(16, 9, 2020, 0), 11 | # Contains a bitmask that specifies the valid bits 'flags'r 12 | mask=0x3f, 13 | # Contains a bitmask that specifies the Boolean attributes of the file. 14 | flags=0x0, 15 | # The operating system for which this file was designed. 16 | # 0x4 - NT and there is no need to change it. 17 | # OS=0x40004, 18 | OS=0x4, 19 | # The general type of file. 20 | # 0x1 - the file is an application. 21 | fileType=0x1, 22 | # The function of the file. 23 | # 0x0 - the function is not defined for this fileType 24 | subtype=0x0, 25 | # Creation date and time stamp. 26 | date=(0, 0) 27 | ), 28 | kids=[ 29 | StringFileInfo( 30 | [ 31 | StringTable( 32 | u'040904B0', 33 | [StringStruct(u'Comments', u'Youtube-dlc Command Line Interface.'), 34 | StringStruct(u'CompanyName', u'theidel@uni-bremen.de'), 35 | StringStruct(u'FileDescription', u'Media Downloader'), 36 | StringStruct(u'FileVersion', u'16.9.2020.0'), 37 | StringStruct(u'InternalName', u'youtube-dlc'), 38 | StringStruct(u'LegalCopyright', u'theidel@uni-bremen.de | UNLICENSE'), 39 | StringStruct(u'OriginalFilename', u'youtube-dlc.exe'), 40 | StringStruct(u'ProductName', u'Youtube-dlc'), 41 | StringStruct(u'ProductVersion', u'16.9.2020.0 | git.io/JUGsM')]) 42 | ]), 43 | VarFileInfo([VarStruct(u'Translation', [0, 1200])]) 44 | ] 45 | ) 46 | -------------------------------------------------------------------------------- /youtube_dlc/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import unicode_literals 3 | 4 | # Execute with 5 | # $ python youtube_dlc/__main__.py (2.6+) 6 | # $ python -m youtube_dlc (2.7+) 7 | 8 | import sys 9 | 10 | if __package__ is None and not hasattr(sys, 'frozen'): 11 | # direct call of __main__.py 12 | import os.path 13 | path = os.path.realpath(os.path.abspath(__file__)) 14 | sys.path.insert(0, os.path.dirname(os.path.dirname(path))) 15 | 16 | import youtube_dlc 17 | 18 | if __name__ == '__main__': 19 | youtube_dlc.main() 20 | -------------------------------------------------------------------------------- /youtube_dlc/downloader/rtsp.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import os 4 | import subprocess 5 | 6 | from .common import FileDownloader 7 | from ..utils import ( 8 | check_executable, 9 | encodeFilename, 10 | ) 11 | 12 | 13 | class RtspFD(FileDownloader): 14 | def real_download(self, filename, info_dict): 15 | url = info_dict['url'] 16 | self.report_destination(filename) 17 | tmpfilename = self.temp_name(filename) 18 | 19 | if check_executable('mplayer', ['-h']): 20 | args = [ 21 | 'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', 22 | '-dumpstream', '-dumpfile', tmpfilename, url] 23 | elif check_executable('mpv', ['-h']): 24 | args = [ 25 | 'mpv', '-really-quiet', '--vo=null', '--stream-dump=' + tmpfilename, url] 26 | else: 27 | self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.') 28 | return False 29 | 30 | self._debug_cmd(args) 31 | 32 | retval = subprocess.call(args) 33 | if retval == 0: 34 | fsize = os.path.getsize(encodeFilename(tmpfilename)) 35 | self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) 36 | self.try_rename(tmpfilename, filename) 37 | self._hook_progress({ 38 | 'downloaded_bytes': fsize, 39 | 'total_bytes': fsize, 40 | 'filename': filename, 41 | 'status': 'finished', 42 | }) 43 | return True 44 | else: 45 | self.to_stderr('\n') 46 | self.report_error('%s exited with code %d' % (args[0], retval)) 47 | return False 48 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | try: 4 | from .lazy_extractors import * 5 | from .lazy_extractors import _ALL_CLASSES 6 | _LAZY_LOADER = True 7 | except ImportError: 8 | _LAZY_LOADER = False 9 | from .extractors import * 10 | 11 | _ALL_CLASSES = [ 12 | klass 13 | for name, klass in globals().items() 14 | if name.endswith('IE') and name != 'GenericIE' 15 | ] 16 | _ALL_CLASSES.append(GenericIE) 17 | 18 | 19 | def gen_extractor_classes(): 20 | """ Return a list of supported extractors. 21 | The order does matter; the first extractor matched is the one handling the URL. 22 | """ 23 | return _ALL_CLASSES 24 | 25 | 26 | def gen_extractors(): 27 | """ Return a list of an instance of every supported extractor. 28 | The order does matter; the first extractor matched is the one handling the URL. 29 | """ 30 | return [klass() for klass in gen_extractor_classes()] 31 | 32 | 33 | def list_extractors(age_limit): 34 | """ 35 | Return a list of extractors that are suitable for the given age, 36 | sorted by extractor ID. 37 | """ 38 | 39 | return sorted( 40 | filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), 41 | key=lambda ie: ie.IE_NAME.lower()) 42 | 43 | 44 | def get_info_extractor(ie_name): 45 | """Returns the info extractor class with the given ie_name""" 46 | return globals()[ie_name + 'IE'] 47 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/academicearth.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import re 4 | 5 | from .common import InfoExtractor 6 | 7 | 8 | class AcademicEarthCourseIE(InfoExtractor): 9 | _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P[^?#/]+)' 10 | IE_NAME = 'AcademicEarth:Course' 11 | _TEST = { 12 | 'url': 'http://academicearth.org/playlists/laws-of-nature/', 13 | 'info_dict': { 14 | 'id': 'laws-of-nature', 15 | 'title': 'Laws of Nature', 16 | 'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.', 17 | }, 18 | 'playlist_count': 3, 19 | } 20 | 21 | def _real_extract(self, url): 22 | playlist_id = self._match_id(url) 23 | 24 | webpage = self._download_webpage(url, playlist_id) 25 | title = self._html_search_regex( 26 | r'

    ]*?>(.*?)

    ', webpage, 'title') 27 | description = self._html_search_regex( 28 | r'

    ]*?>(.*?)

    ', 29 | webpage, 'description', fatal=False) 30 | urls = re.findall( 31 | r'
  • \s*?', 32 | webpage) 33 | entries = [self.url_result(u) for u in urls] 34 | 35 | return { 36 | '_type': 'playlist', 37 | 'id': playlist_id, 38 | 'title': title, 39 | 'description': description, 40 | 'entries': entries, 41 | } 42 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/adobeconnect.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import ( 6 | compat_parse_qs, 7 | compat_urlparse, 8 | ) 9 | 10 | 11 | class AdobeConnectIE(InfoExtractor): 12 | _VALID_URL = r'https?://\w+\.adobeconnect\.com/(?P[\w-]+)' 13 | 14 | def _real_extract(self, url): 15 | video_id = self._match_id(url) 16 | webpage = self._download_webpage(url, video_id) 17 | title = self._html_search_regex(r'(.+?)', webpage, 'title') 18 | qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1]) 19 | is_live = qs.get('isLive', ['false'])[0] == 'true' 20 | formats = [] 21 | for con_string in qs['conStrings'][0].split(','): 22 | formats.append({ 23 | 'format_id': con_string.split('://')[0], 24 | 'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]), 25 | 'ext': 'flv', 26 | 'play_path': 'mp4:' + qs['streamName'][0], 27 | 'rtmp_conn': 'S:' + qs['ticket'][0], 28 | 'rtmp_live': is_live, 29 | 'url': con_string, 30 | }) 31 | 32 | return { 33 | 'id': video_id, 34 | 'title': self._live_title(title) if is_live else title, 35 | 'formats': formats, 36 | 'is_live': is_live, 37 | } 38 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/aliexpress.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import compat_str 6 | from ..utils import ( 7 | float_or_none, 8 | try_get, 9 | ) 10 | 11 | 12 | class AliExpressLiveIE(InfoExtractor): 13 | _VALID_URL = r'https?://live\.aliexpress\.com/live/(?P\d+)' 14 | _TEST = { 15 | 'url': 'https://live.aliexpress.com/live/2800002704436634', 16 | 'md5': 'e729e25d47c5e557f2630eaf99b740a5', 17 | 'info_dict': { 18 | 'id': '2800002704436634', 19 | 'ext': 'mp4', 20 | 'title': 'CASIMA7.22', 21 | 'thumbnail': r're:http://.*\.jpg', 22 | 'uploader': 'CASIMA Official Store', 23 | 'timestamp': 1500717600, 24 | 'upload_date': '20170722', 25 | }, 26 | } 27 | 28 | def _real_extract(self, url): 29 | video_id = self._match_id(url) 30 | 31 | webpage = self._download_webpage(url, video_id) 32 | 33 | data = self._parse_json( 34 | self._search_regex( 35 | r'(?s)runParams\s*=\s*({.+?})\s*;?\s*var', 36 | webpage, 'runParams'), 37 | video_id) 38 | 39 | title = data['title'] 40 | 41 | formats = self._extract_m3u8_formats( 42 | data['replyStreamUrl'], video_id, 'mp4', 43 | entry_protocol='m3u8_native', m3u8_id='hls') 44 | 45 | return { 46 | 'id': video_id, 47 | 'title': title, 48 | 'thumbnail': data.get('coverUrl'), 49 | 'uploader': try_get( 50 | data, lambda x: x['followBar']['name'], compat_str), 51 | 'timestamp': float_or_none(data.get('startTimeLong'), scale=1000), 52 | 'formats': formats, 53 | } 54 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/aljazeera.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class AlJazeeraIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?:programmes|video)/.*?/(?P[^/]+)\.html' 8 | 9 | _TESTS = [{ 10 | 'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html', 11 | 'info_dict': { 12 | 'id': '3792260579001', 13 | 'ext': 'mp4', 14 | 'title': 'The Slum - Episode 1: Deliverance', 15 | 'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', 16 | 'uploader_id': '665003303001', 17 | 'timestamp': 1411116829, 18 | 'upload_date': '20140919', 19 | }, 20 | 'add_ie': ['BrightcoveNew'], 21 | 'skip': 'Not accessible from Travis CI server', 22 | }, { 23 | 'url': 'http://www.aljazeera.com/video/news/2017/05/sierra-leone-709-carat-diamond-auctioned-170511100111930.html', 24 | 'only_matching': True, 25 | }] 26 | BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/665003303001/default_default/index.html?videoId=%s' 27 | 28 | def _real_extract(self, url): 29 | program_name = self._match_id(url) 30 | webpage = self._download_webpage(url, program_name) 31 | brightcove_id = self._search_regex( 32 | r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id') 33 | return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id) 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/behindkink.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .common import InfoExtractor 7 | from ..utils import url_basename 8 | 9 | 10 | class BehindKinkIE(InfoExtractor): 11 | _VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P[0-9]{4})/(?P[0-9]{2})/(?P[0-9]{2})/(?P[^/#?_]+)' 12 | _TEST = { 13 | 'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/', 14 | 'md5': '507b57d8fdcd75a41a9a7bdb7989c762', 15 | 'info_dict': { 16 | 'id': '37127', 17 | 'ext': 'mp4', 18 | 'title': 'What are you passionate about – Marley Blaze', 19 | 'description': 'md5:aee8e9611b4ff70186f752975d9b94b4', 20 | 'upload_date': '20141205', 21 | 'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg', 22 | 'age_limit': 18, 23 | } 24 | } 25 | 26 | def _real_extract(self, url): 27 | mobj = re.match(self._VALID_URL, url) 28 | display_id = mobj.group('id') 29 | 30 | webpage = self._download_webpage(url, display_id) 31 | 32 | video_url = self._search_regex( 33 | r'[\w-]+)-online' 13 | _TEST = { 14 | 'url': 'https://player.bfi.org.uk/free/film/watch-computer-doctor-1974-online', 15 | 'md5': 'e8783ebd8e061ec4bc6e9501ed547de8', 16 | 'info_dict': { 17 | 'id': 'htNnhlZjE60C9VySkQEIBtU-cNV1Xx63', 18 | 'ext': 'mp4', 19 | 'title': 'Computer Doctor', 20 | 'description': 'md5:fb6c240d40c4dbe40428bdd62f78203b', 21 | }, 22 | 'skip': 'BFI Player films cannot be played outside of the UK', 23 | } 24 | 25 | def _real_extract(self, url): 26 | video_id = self._match_id(url) 27 | webpage = self._download_webpage(url, video_id) 28 | entries = [] 29 | for player_el in re.findall(r'(?s)<[^>]+class="player"[^>]*>', webpage): 30 | player_attr = extract_attributes(player_el) 31 | ooyala_id = player_attr.get('data-video-id') 32 | if not ooyala_id: 33 | continue 34 | entries.append(self.url_result( 35 | 'ooyala:' + ooyala_id, 'Ooyala', 36 | ooyala_id, player_attr.get('data-label'))) 37 | return self.playlist_result(entries) 38 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/bild.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import ( 6 | int_or_none, 7 | unescapeHTML, 8 | ) 9 | 10 | 11 | class BildIE(InfoExtractor): 12 | _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P[^/]+)-(?P\d+)(?:,auto=true)?\.bild\.html' 13 | IE_DESC = 'Bild.de' 14 | _TEST = { 15 | 'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html', 16 | 'md5': 'dd495cbd99f2413502a1713a1156ac8a', 17 | 'info_dict': { 18 | 'id': '38184146', 19 | 'ext': 'mp4', 20 | 'title': 'Das können die neuen iPads', 21 | 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f', 22 | 'thumbnail': r're:^https?://.*\.jpg$', 23 | 'duration': 196, 24 | } 25 | } 26 | 27 | def _real_extract(self, url): 28 | video_id = self._match_id(url) 29 | 30 | video_data = self._download_json( 31 | url.split('.bild.html')[0] + ',view=json.bild.html', video_id) 32 | 33 | return { 34 | 'id': video_id, 35 | 'title': unescapeHTML(video_data['title']).strip(), 36 | 'description': unescapeHTML(video_data.get('description')), 37 | 'url': video_data['clipList'][0]['srces'][0]['src'], 38 | 'thumbnail': video_data.get('poster'), 39 | 'duration': int_or_none(video_data.get('durationSec')), 40 | } 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/cbssports.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .cbs import CBSBaseIE 4 | 5 | 6 | class CBSSportsIE(CBSBaseIE): 7 | _VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/(?:video|news)/(?P[^/?#&]+)' 8 | 9 | _TESTS = [{ 10 | 'url': 'https://www.cbssports.com/nba/video/donovan-mitchell-flashes-star-potential-in-game-2-victory-over-thunder/', 11 | 'info_dict': { 12 | 'id': '1214315075735', 13 | 'ext': 'mp4', 14 | 'title': 'Donovan Mitchell flashes star potential in Game 2 victory over Thunder', 15 | 'description': 'md5:df6f48622612c2d6bd2e295ddef58def', 16 | 'timestamp': 1524111457, 17 | 'upload_date': '20180419', 18 | 'uploader': 'CBSI-NEW', 19 | }, 20 | 'params': { 21 | # m3u8 download 22 | 'skip_download': True, 23 | } 24 | }, { 25 | 'url': 'https://www.cbssports.com/nba/news/nba-playoffs-2018-watch-76ers-vs-heat-game-3-series-schedule-tv-channel-online-stream/', 26 | 'only_matching': True, 27 | }] 28 | 29 | def _extract_video_info(self, filter_query, video_id): 30 | return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id) 31 | 32 | def _real_extract(self, url): 33 | display_id = self._match_id(url) 34 | webpage = self._download_webpage(url, display_id) 35 | video_id = self._search_regex( 36 | [r'(?:=|%26)pcid%3D(\d+)', r'embedVideo(?:Container)?_(\d+)'], 37 | webpage, 'video id') 38 | return self._extract_video_info('byId=%s' % video_id, video_id) 39 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/cinemax.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .hbo import HBOBaseIE 7 | 8 | 9 | class CinemaxIE(HBOBaseIE): 10 | _VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P[^/]+/video/[0-9a-z-]+-(?P\d+))' 11 | _TESTS = [{ 12 | 'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903', 13 | 'md5': '82e0734bba8aa7ef526c9dd00cf35a05', 14 | 'info_dict': { 15 | 'id': '20126903', 16 | 'ext': 'mp4', 17 | 'title': 'S1 Ep 1: Recap', 18 | }, 19 | 'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'], 20 | }, { 21 | 'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903.embed', 22 | 'only_matching': True, 23 | }] 24 | 25 | def _real_extract(self, url): 26 | path, video_id = re.match(self._VALID_URL, url).groups() 27 | info = self._extract_info('https://www.cinemax.com/%s.xml' % path, video_id) 28 | info['id'] = video_id 29 | return info 30 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/cliprs.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .onet import OnetBaseIE 5 | 6 | 7 | class ClipRsIE(OnetBaseIE): 8 | _VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P[^/]+)/\d+' 9 | _TEST = { 10 | 'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732', 11 | 'md5': 'c412d57815ba07b56f9edc7b5d6a14e5', 12 | 'info_dict': { 13 | 'id': '1488842.1399140381', 14 | 'ext': 'mp4', 15 | 'title': 'PREMIJERA Frajle predstavljaju novi spot za pesmu Moli me, moli', 16 | 'description': 'md5:56ce2c3b4ab31c5a2e0b17cb9a453026', 17 | 'duration': 229, 18 | 'timestamp': 1459850243, 19 | 'upload_date': '20160405', 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | display_id = self._match_id(url) 25 | 26 | webpage = self._download_webpage(url, display_id) 27 | 28 | mvp_id = self._search_mvp_id(webpage) 29 | 30 | info_dict = self._extract_from_id(mvp_id, webpage) 31 | info_dict['display_id'] = display_id 32 | 33 | return info_dict 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/commonmistakes.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import sys 4 | 5 | from .common import InfoExtractor 6 | from ..utils import ExtractorError 7 | 8 | 9 | class CommonMistakesIE(InfoExtractor): 10 | IE_DESC = False # Do not list 11 | _VALID_URL = r'''(?x) 12 | (?:url|URL)$ 13 | ''' 14 | 15 | _TESTS = [{ 16 | 'url': 'url', 17 | 'only_matching': True, 18 | }, { 19 | 'url': 'URL', 20 | 'only_matching': True, 21 | }] 22 | 23 | def _real_extract(self, url): 24 | msg = ( 25 | 'You\'ve asked youtube-dlc to download the URL "%s". ' 26 | 'That doesn\'t make any sense. ' 27 | 'Simply remove the parameter in your command or configuration.' 28 | ) % url 29 | if not self._downloader.params.get('verbose'): 30 | msg += ' Add -v to the command line to see what arguments and configuration youtube-dlc got.' 31 | raise ExtractorError(msg, expected=True) 32 | 33 | 34 | class UnicodeBOMIE(InfoExtractor): 35 | IE_DESC = False 36 | _VALID_URL = r'(?P\ufeff)(?P.*)$' 37 | 38 | # Disable test for python 3.2 since BOM is broken in re in this version 39 | # (see https://github.com/ytdl-org/youtube-dl/issues/9751) 40 | _TESTS = [] if (3, 0) < sys.version_info <= (3, 3) else [{ 41 | 'url': '\ufeffhttp://www.youtube.com/watch?v=BaW_jenozKc', 42 | 'only_matching': True, 43 | }] 44 | 45 | def _real_extract(self, url): 46 | real_url = self._match_id(url) 47 | self.report_warning( 48 | 'Your URL starts with a Byte Order Mark (BOM). ' 49 | 'Removing the BOM and looking for "%s" ...' % real_url) 50 | return self.url_result(real_url) 51 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/defense.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class DefenseGouvFrIE(InfoExtractor): 7 | IE_NAME = 'defense.gouv.fr' 8 | _VALID_URL = r'https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P[^/?#]*)' 9 | 10 | _TEST = { 11 | 'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', 12 | 'md5': '75bba6124da7e63d2d60b5244ec9430c', 13 | 'info_dict': { 14 | 'id': '11213', 15 | 'ext': 'mp4', 16 | 'title': 'attaque-chimique-syrienne-du-21-aout-2013-1' 17 | } 18 | } 19 | 20 | def _real_extract(self, url): 21 | title = self._match_id(url) 22 | webpage = self._download_webpage(url, title) 23 | 24 | video_id = self._search_regex( 25 | r"flashvars.pvg_id=\"(\d+)\";", 26 | webpage, 'ID') 27 | 28 | json_url = ( 29 | 'http://static.videos.gouv.fr/brightcovehub/export/json/%s' % 30 | video_id) 31 | info = self._download_json(json_url, title, 'Downloading JSON config') 32 | video_url = info['renditions'][0]['url'] 33 | 34 | return { 35 | 'id': video_id, 36 | 'ext': 'mp4', 37 | 'url': video_url, 38 | 'title': title, 39 | } 40 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/discoverynetworks.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .dplay import DPlayIE 7 | 8 | 9 | class DiscoveryNetworksDeIE(DPlayIE): 10 | _VALID_URL = r'https?://(?:www\.)?(?P(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show|sendungen)/(?P[^/]+)/(?:video/)?(?P[^/]+)' 11 | 12 | _TESTS = [{ 13 | 'url': 'https://www.tlc.de/programme/breaking-amish/video/die-welt-da-drauen/DCB331270001100', 14 | 'info_dict': { 15 | 'id': '78867', 16 | 'ext': 'mp4', 17 | 'title': 'Die Welt da draußen', 18 | 'description': 'md5:61033c12b73286e409d99a41742ef608', 19 | 'timestamp': 1554069600, 20 | 'upload_date': '20190331', 21 | }, 22 | 'params': { 23 | 'format': 'bestvideo', 24 | 'skip_download': True, 25 | }, 26 | }, { 27 | 'url': 'https://www.dmax.de/programme/dmax-highlights/video/tuning-star-sidney-hoffmann-exklusiv-bei-dmax/191023082312316', 28 | 'only_matching': True, 29 | }, { 30 | 'url': 'https://www.dplay.co.uk/show/ghost-adventures/video/hotel-leger-103620/EHD_280313B', 31 | 'only_matching': True, 32 | }, { 33 | 'url': 'https://tlc.de/sendungen/breaking-amish/die-welt-da-drauen/', 34 | 'only_matching': True, 35 | }] 36 | 37 | def _real_extract(self, url): 38 | domain, programme, alternate_id = re.match(self._VALID_URL, url).groups() 39 | country = 'GB' if domain == 'dplay.co.uk' else 'DE' 40 | realm = 'questuk' if country == 'GB' else domain.replace('.', '') 41 | return self._get_disco_api_info( 42 | url, '%s/%s' % (programme, alternate_id), 43 | 'sonic-eu1-prod.disco-api.com', realm, country) 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/dropbox.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import os.path 5 | import re 6 | 7 | from .common import InfoExtractor 8 | from ..compat import compat_urllib_parse_unquote 9 | from ..utils import url_basename 10 | 11 | 12 | class DropboxIE(InfoExtractor): 13 | _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P[a-zA-Z0-9]{15})/.*' 14 | _TESTS = [ 15 | { 16 | 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dlc%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', 17 | 'info_dict': { 18 | 'id': 'nelirfsxnmcfbfh', 19 | 'ext': 'mp4', 20 | 'title': 'youtube-dlc test video \'ä"BaW_jenozKc' 21 | } 22 | }, { 23 | 'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v', 24 | 'only_matching': True, 25 | }, 26 | ] 27 | 28 | def _real_extract(self, url): 29 | mobj = re.match(self._VALID_URL, url) 30 | video_id = mobj.group('id') 31 | fn = compat_urllib_parse_unquote(url_basename(url)) 32 | title = os.path.splitext(fn)[0] 33 | video_url = re.sub(r'[?&]dl=0', '', url) 34 | video_url += ('?' if '?' not in video_url else '&') + 'dl=1' 35 | 36 | return { 37 | 'id': video_id, 38 | 'title': title, 39 | 'url': video_url, 40 | } 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ebaumsworld.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class EbaumsWorldIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?ebaumsworld\.com/videos/[^/]+/(?P\d+)' 8 | 9 | _TEST = { 10 | 'url': 'http://www.ebaumsworld.com/videos/a-giant-python-opens-the-door/83367677/', 11 | 'info_dict': { 12 | 'id': '83367677', 13 | 'ext': 'mp4', 14 | 'title': 'A Giant Python Opens The Door', 15 | 'description': 'This is how nightmares start...', 16 | 'uploader': 'jihadpizza', 17 | }, 18 | } 19 | 20 | def _real_extract(self, url): 21 | video_id = self._match_id(url) 22 | config = self._download_xml( 23 | 'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id) 24 | video_url = config.find('file').text 25 | 26 | return { 27 | 'id': video_id, 28 | 'title': config.find('title').text, 29 | 'url': video_url, 30 | 'description': config.find('description').text, 31 | 'thumbnail': config.find('image').text, 32 | 'uploader': config.find('username').text, 33 | } 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/echomsk.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .common import InfoExtractor 7 | 8 | 9 | class EchoMskIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:www\.)?echo\.msk\.ru/sounds/(?P\d+)' 11 | _TEST = { 12 | 'url': 'http://www.echo.msk.ru/sounds/1464134.html', 13 | 'md5': '2e44b3b78daff5b458e4dbc37f191f7c', 14 | 'info_dict': { 15 | 'id': '1464134', 16 | 'ext': 'mp3', 17 | 'title': 'Особое мнение - 29 декабря 2014, 19:08', 18 | }, 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | 24 | webpage = self._download_webpage(url, video_id) 25 | 26 | audio_url = self._search_regex( 27 | r'', webpage, 'audio URL') 28 | 29 | title = self._html_search_regex( 30 | r'([^<]+)', 31 | webpage, 'title') 32 | 33 | air_date = self._html_search_regex( 34 | r'(?s)
    (.+?)
    ', 35 | webpage, 'date', fatal=False, default=None) 36 | 37 | if air_date: 38 | air_date = re.sub(r'(\s)\1+', r'\1', air_date) 39 | if air_date: 40 | title = '%s - %s' % (title, air_date) 41 | 42 | return { 43 | 'id': video_id, 44 | 'url': audio_url, 45 | 'title': title, 46 | } 47 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ehow.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..compat import compat_urllib_parse_unquote 5 | 6 | 7 | class EHowIE(InfoExtractor): 8 | IE_NAME = 'eHow' 9 | _VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P[0-9]+)' 10 | _TEST = { 11 | 'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html', 12 | 'md5': '9809b4e3f115ae2088440bcb4efbf371', 13 | 'info_dict': { 14 | 'id': '12245069', 15 | 'ext': 'flv', 16 | 'title': 'Hardwood Flooring Basics', 17 | 'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...', 18 | 'uploader': 'Erick Nathan', 19 | } 20 | } 21 | 22 | def _real_extract(self, url): 23 | video_id = self._match_id(url) 24 | webpage = self._download_webpage(url, video_id) 25 | video_url = self._search_regex( 26 | r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL') 27 | final_url = compat_urllib_parse_unquote(video_url) 28 | uploader = self._html_search_meta('uploader', webpage) 29 | title = self._og_search_title(webpage).replace(' | eHow', '') 30 | 31 | return { 32 | 'id': video_id, 33 | 'url': final_url, 34 | 'title': title, 35 | 'thumbnail': self._og_search_thumbnail(webpage), 36 | 'description': self._og_search_description(webpage), 37 | 'uploader': uploader, 38 | } 39 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/embedly.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import compat_urllib_parse_unquote 6 | 7 | 8 | class EmbedlyIE(InfoExtractor): 9 | _VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?url=(?P[^#&]+)' 10 | _TESTS = [{ 11 | 'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1', 12 | 'only_matching': True, 13 | }] 14 | 15 | def _real_extract(self, url): 16 | return self.url_result(compat_urllib_parse_unquote(self._match_id(url))) 17 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/engadget.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class EngadgetIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?engadget\.com/video/(?P[^/?#]+)' 8 | 9 | _TESTS = [{ 10 | # video with 5min ID 11 | 'url': 'http://www.engadget.com/video/518153925/', 12 | 'md5': 'c6820d4828a5064447a4d9fc73f312c9', 13 | 'info_dict': { 14 | 'id': '518153925', 15 | 'ext': 'mp4', 16 | 'title': 'Samsung Galaxy Tab Pro 8.4 Review', 17 | }, 18 | 'add_ie': ['FiveMin'], 19 | }, { 20 | # video with vidible ID 21 | 'url': 'https://www.engadget.com/video/57a28462134aa15a39f0421a/', 22 | 'only_matching': True, 23 | }] 24 | 25 | def _real_extract(self, url): 26 | video_id = self._match_id(url) 27 | return self.url_result('aol-video:%s' % video_id) 28 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/filmweb.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import re 4 | 5 | from .common import InfoExtractor 6 | 7 | 8 | class FilmwebIE(InfoExtractor): 9 | _VALID_URL = r'https?://(?:www\.)?filmweb\.no/(?Ptrailere|filmnytt)/article(?P\d+)\.ece' 10 | _TEST = { 11 | 'url': 'http://www.filmweb.no/trailere/article1264921.ece', 12 | 'md5': 'e353f47df98e557d67edaceda9dece89', 13 | 'info_dict': { 14 | 'id': '13033574', 15 | 'ext': 'mp4', 16 | 'title': 'Det som en gang var', 17 | 'upload_date': '20160316', 18 | 'timestamp': 1458140101, 19 | 'uploader_id': '12639966', 20 | 'uploader': 'Live Roaldset', 21 | } 22 | } 23 | 24 | def _real_extract(self, url): 25 | article_type, article_id = re.match(self._VALID_URL, url).groups() 26 | if article_type == 'filmnytt': 27 | webpage = self._download_webpage(url, article_id) 28 | article_id = self._search_regex(r'data-videoid="(\d+)"', webpage, 'article id') 29 | embed_code = self._download_json( 30 | 'https://www.filmweb.no/template_v2/ajax/json_trailerEmbed.jsp', 31 | article_id, query={ 32 | 'articleId': article_id, 33 | })['embedCode'] 34 | iframe_url = self._proto_relative_url(self._search_regex( 35 | r']+src="([^"]+)', embed_code, 'iframe url')) 36 | 37 | return { 38 | '_type': 'url_transparent', 39 | 'id': article_id, 40 | 'url': iframe_url, 41 | 'ie_key': 'TwentyThreeVideo', 42 | } 43 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/formula1.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class Formula1IE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?formula1\.com/(?:content/fom-website/)?en/video/\d{4}/\d{1,2}/(?P.+?)\.html' 9 | _TESTS = [{ 10 | 'url': 'http://www.formula1.com/content/fom-website/en/video/2016/5/Race_highlights_-_Spain_2016.html', 11 | 'md5': '8c79e54be72078b26b89e0e111c0502b', 12 | 'info_dict': { 13 | 'id': 'JvYXJpMzE6pArfHWm5ARp5AiUmD-gibV', 14 | 'ext': 'mp4', 15 | 'title': 'Race highlights - Spain 2016', 16 | }, 17 | 'params': { 18 | # m3u8 download 19 | 'skip_download': True, 20 | }, 21 | 'add_ie': ['Ooyala'], 22 | }, { 23 | 'url': 'http://www.formula1.com/en/video/2016/5/Race_highlights_-_Spain_2016.html', 24 | 'only_matching': True, 25 | }] 26 | 27 | def _real_extract(self, url): 28 | display_id = self._match_id(url) 29 | webpage = self._download_webpage(url, display_id) 30 | ooyala_embed_code = self._search_regex( 31 | r'data-videoid="([^"]+)"', webpage, 'ooyala embed code') 32 | return self.url_result( 33 | 'ooyala:%s' % ooyala_embed_code, 'Ooyala', ooyala_embed_code) 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/fox9.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class FOX9IE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?fox9\.com/video/(?P\d+)' 9 | 10 | def _real_extract(self, url): 11 | video_id = self._match_id(url) 12 | return self.url_result( 13 | 'anvato:anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b:' + video_id, 14 | 'Anvato', video_id) 15 | 16 | 17 | class FOX9NewsIE(InfoExtractor): 18 | _VALID_URL = r'https?://(?:www\.)?fox9\.com/news/(?P[^/?&#]+)' 19 | _TEST = { 20 | 'url': 'https://www.fox9.com/news/black-bear-in-tree-draws-crowd-in-downtown-duluth-minnesota', 21 | 'md5': 'd6e1b2572c3bab8a849c9103615dd243', 22 | 'info_dict': { 23 | 'id': '314473', 24 | 'ext': 'mp4', 25 | 'title': 'Bear climbs tree in downtown Duluth', 26 | 'description': 'md5:6a36bfb5073a411758a752455408ac90', 27 | 'duration': 51, 28 | 'timestamp': 1478123580, 29 | 'upload_date': '20161102', 30 | 'uploader': 'EPFOX', 31 | 'categories': ['News', 'Sports'], 32 | 'tags': ['news', 'video'], 33 | }, 34 | } 35 | 36 | def _real_extract(self, url): 37 | display_id = self._match_id(url) 38 | webpage = self._download_webpage(url, display_id) 39 | anvato_id = self._search_regex( 40 | r'anvatoId\s*:\s*[\'"](\d+)', webpage, 'anvato id') 41 | return self.url_result('https://www.fox9.com/video/' + anvato_id, 'FOX9') 42 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/foxsports.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class FoxSportsIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*video/(?P\d+)' 8 | 9 | _TEST = { 10 | 'url': 'http://www.foxsports.com/tennessee/video/432609859715', 11 | 'md5': 'b49050e955bebe32c301972e4012ac17', 12 | 'info_dict': { 13 | 'id': '432609859715', 14 | 'ext': 'mp4', 15 | 'title': 'Courtney Lee on going up 2-0 in series vs. Blazers', 16 | 'description': 'Courtney Lee talks about Memphis being focused.', 17 | # TODO: fix timestamp 18 | 'upload_date': '19700101', # '20150423', 19 | # 'timestamp': 1429761109, 20 | 'uploader': 'NEWA-FNG-FOXSPORTS', 21 | }, 22 | 'params': { 23 | # m3u8 download 24 | 'skip_download': True, 25 | }, 26 | 'add_ie': ['ThePlatform'], 27 | } 28 | 29 | def _real_extract(self, url): 30 | video_id = self._match_id(url) 31 | 32 | return self.url_result( 33 | 'https://feed.theplatform.com/f/BKQ29B/foxsports-all?byId=' + video_id, 'ThePlatformFeed') 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/freespeech.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .youtube import YoutubeIE 5 | 6 | 7 | class FreespeechIE(InfoExtractor): 8 | IE_NAME = 'freespeech.org' 9 | _VALID_URL = r'https?://(?:www\.)?freespeech\.org/stories/(?P.+)' 10 | _TEST = { 11 | 'add_ie': ['Youtube'], 12 | 'url': 'http://www.freespeech.org/stories/fcc-announces-net-neutrality-rollback-whats-stake/', 13 | 'info_dict': { 14 | 'id': 'waRk6IPqyWM', 15 | 'ext': 'mp4', 16 | 'title': 'What\'s At Stake - Net Neutrality Special', 17 | 'description': 'Presented by MNN and FSTV', 18 | 'upload_date': '20170728', 19 | 'uploader_id': 'freespeechtv', 20 | 'uploader': 'freespeechtv', 21 | }, 22 | } 23 | 24 | def _real_extract(self, url): 25 | display_id = self._match_id(url) 26 | webpage = self._download_webpage(url, display_id) 27 | youtube_url = self._search_regex( 28 | r'data-video-url="([^"]+)"', 29 | webpage, 'youtube url') 30 | 31 | return self.url_result(youtube_url, YoutubeIE.ie_key()) 32 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/gigya.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | from ..utils import ( 6 | ExtractorError, 7 | urlencode_postdata, 8 | ) 9 | 10 | 11 | class GigyaBaseIE(InfoExtractor): 12 | def _gigya_login(self, auth_data): 13 | auth_info = self._download_json( 14 | 'https://accounts.eu1.gigya.com/accounts.login', None, 15 | note='Logging in', errnote='Unable to log in', 16 | data=urlencode_postdata(auth_data)) 17 | 18 | error_message = auth_info.get('errorDetails') or auth_info.get('errorMessage') 19 | if error_message: 20 | raise ExtractorError( 21 | 'Unable to login: %s' % error_message, expected=True) 22 | return auth_info 23 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/glide.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class GlideIE(InfoExtractor): 8 | IE_DESC = 'Glide mobile video messages (glide.me)' 9 | _VALID_URL = r'https?://share\.glide\.me/(?P[A-Za-z0-9\-=_+]+)' 10 | _TEST = { 11 | 'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==', 12 | 'md5': '4466372687352851af2d131cfaa8a4c7', 13 | 'info_dict': { 14 | 'id': 'UZF8zlmuQbe4mr+7dCiQ0w==', 15 | 'ext': 'mp4', 16 | 'title': "Damon's Glide message", 17 | 'thumbnail': r're:^https?://.*?\.cloudfront\.net/.*\.jpg$', 18 | } 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | 24 | webpage = self._download_webpage(url, video_id) 25 | 26 | title = self._html_search_regex( 27 | r'(.+?)', webpage, 28 | 'title', default=None) or self._og_search_title(webpage) 29 | video_url = self._proto_relative_url(self._search_regex( 30 | r']+src=(["\'])(?P.+?)\1', 31 | webpage, 'video URL', default=None, 32 | group='url')) or self._og_search_video_url(webpage) 33 | thumbnail = self._proto_relative_url(self._search_regex( 34 | r']+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P.+?)\1', 35 | webpage, 'thumbnail url', default=None, 36 | group='url')) or self._og_search_thumbnail(webpage) 37 | 38 | return { 39 | 'id': video_id, 40 | 'title': title, 41 | 'url': video_url, 42 | 'thumbnail': thumbnail, 43 | } 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/goshgay.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import ( 6 | compat_parse_qs, 7 | ) 8 | from ..utils import ( 9 | parse_duration, 10 | ) 11 | 12 | 13 | class GoshgayIE(InfoExtractor): 14 | _VALID_URL = r'https?://(?:www\.)?goshgay\.com/video(?P\d+?)($|/)' 15 | _TEST = { 16 | 'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video', 17 | 'md5': '4b6db9a0a333142eb9f15913142b0ed1', 18 | 'info_dict': { 19 | 'id': '299069', 20 | 'ext': 'flv', 21 | 'title': 'DIESEL SFW XXX Video', 22 | 'thumbnail': r're:^http://.*\.jpg$', 23 | 'duration': 80, 24 | 'age_limit': 18, 25 | } 26 | } 27 | 28 | def _real_extract(self, url): 29 | video_id = self._match_id(url) 30 | webpage = self._download_webpage(url, video_id) 31 | 32 | title = self._html_search_regex( 33 | r'

    (.*?)<', webpage, 'title') 34 | duration = parse_duration(self._html_search_regex( 35 | r'\s*-?\s*(.*?)', 36 | webpage, 'duration', fatal=False)) 37 | 38 | flashvars = compat_parse_qs(self._html_search_regex( 39 | r'\d+)\.html' 9 | _TEST = { 10 | 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html', 11 | 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798', 12 | 'info_dict': { 13 | 'id': '5156', 14 | 'ext': 'mp4', 15 | 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis', 16 | 'duration': 1219, 17 | } 18 | } 19 | 20 | def _real_extract(self, url): 21 | video_id = self._match_id(url) 22 | webpage = self._download_webpage(url, video_id) 23 | 24 | root_path = self._search_regex( 25 | r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', 26 | default='http://evt.dispeak.com/nvidia/events/gtc15/') 27 | xml_file_id = self._search_regex( 28 | r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id') 29 | 30 | return { 31 | '_type': 'url_transparent', 32 | 'id': video_id, 33 | 'url': '%sxml/%s.xml' % (root_path, xml_file_id), 34 | 'ie_key': 'DigitallySpeaking', 35 | } 36 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/helsinki.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from __future__ import unicode_literals 4 | 5 | from .common import InfoExtractor 6 | from ..utils import js_to_json 7 | 8 | 9 | class HelsinkiIE(InfoExtractor): 10 | IE_DESC = 'helsinki.fi' 11 | _VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P\d+)' 12 | _TEST = { 13 | 'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258', 14 | 'info_dict': { 15 | 'id': '20258', 16 | 'ext': 'mp4', 17 | 'title': 'Tietotekniikkafoorumi-iltapäivä', 18 | 'description': 'md5:f5c904224d43c133225130fe156a5ee0', 19 | }, 20 | 'params': { 21 | 'skip_download': True, # RTMP 22 | } 23 | } 24 | 25 | def _real_extract(self, url): 26 | video_id = self._match_id(url) 27 | webpage = self._download_webpage(url, video_id) 28 | 29 | params = self._parse_json(self._html_search_regex( 30 | r'(?s)jwplayer\("player"\).setup\((\{.*?\})\);', 31 | webpage, 'player code'), video_id, transform_source=js_to_json) 32 | formats = [{ 33 | 'url': s['file'], 34 | 'ext': 'mp4', 35 | } for s in params['sources']] 36 | self._sort_formats(formats) 37 | 38 | return { 39 | 'id': video_id, 40 | 'title': self._og_search_title(webpage).replace('Video: ', ''), 41 | 'description': self._og_search_description(webpage), 42 | 'formats': formats, 43 | } 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/hentaistigma.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class HentaiStigmaIE(InfoExtractor): 7 | _VALID_URL = r'^https?://hentai\.animestigma\.com/(?P[^/]+)' 8 | _TEST = { 9 | 'url': 'http://hentai.animestigma.com/inyouchuu-etsu-bonus/', 10 | 'md5': '4e3d07422a68a4cc363d8f57c8bf0d23', 11 | 'info_dict': { 12 | 'id': 'inyouchuu-etsu-bonus', 13 | 'ext': 'mp4', 14 | 'title': 'Inyouchuu Etsu Bonus', 15 | 'age_limit': 18, 16 | } 17 | } 18 | 19 | def _real_extract(self, url): 20 | video_id = self._match_id(url) 21 | 22 | webpage = self._download_webpage(url, video_id) 23 | 24 | title = self._html_search_regex( 25 | r']+class="posttitle"[^>]*>]*>([^<]+)', 26 | webpage, 'title') 27 | wrap_url = self._html_search_regex( 28 | r']+src="([^"]+mp4)"', webpage, 'wrapper url') 29 | wrap_webpage = self._download_webpage(wrap_url, video_id) 30 | 31 | video_url = self._html_search_regex( 32 | r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url') 33 | 34 | return { 35 | 'id': video_id, 36 | 'url': video_url, 37 | 'title': title, 38 | 'age_limit': 18, 39 | } 40 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/hgtv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class HGTVComShowIE(InfoExtractor): 8 | IE_NAME = 'hgtv.com:show' 9 | _VALID_URL = r'https?://(?:www\.)?hgtv\.com/shows/[^/]+/(?P[^/?#&]+)' 10 | _TESTS = [{ 11 | # data-module="video" 12 | 'url': 'http://www.hgtv.com/shows/flip-or-flop/flip-or-flop-full-episodes-season-4-videos', 13 | 'info_dict': { 14 | 'id': 'flip-or-flop-full-episodes-season-4-videos', 15 | 'title': 'Flip or Flop Full Episodes', 16 | }, 17 | 'playlist_mincount': 15, 18 | }, { 19 | # data-deferred-module="video" 20 | 'url': 'http://www.hgtv.com/shows/good-bones/episodes/an-old-victorian-house-gets-a-new-facelift', 21 | 'only_matching': True, 22 | }] 23 | 24 | def _real_extract(self, url): 25 | display_id = self._match_id(url) 26 | 27 | webpage = self._download_webpage(url, display_id) 28 | 29 | config = self._parse_json( 30 | self._search_regex( 31 | r'(?s)data-(?:deferred-)?module=["\']video["\'][^>]*>.*?]+type=["\']text/x-config["\'][^>]*>(.+?)\d+)' 9 | _TEST = { 10 | 'url': 'http://www.historicfilms.com/tapes/4728', 11 | 'md5': 'd4a437aec45d8d796a38a215db064e9a', 12 | 'info_dict': { 13 | 'id': '4728', 14 | 'ext': 'mov', 15 | 'title': 'Historic Films: GP-7', 16 | 'description': 'md5:1a86a0f3ac54024e419aba97210d959a', 17 | 'thumbnail': r're:^https?://.*\.jpg$', 18 | 'duration': 2096, 19 | }, 20 | } 21 | 22 | def _real_extract(self, url): 23 | video_id = self._match_id(url) 24 | 25 | webpage = self._download_webpage(url, video_id) 26 | 27 | tape_id = self._search_regex( 28 | [r'class="tapeId"[^>]*>([^<]+)<', r'tapeId\s*:\s*"([^"]+)"'], 29 | webpage, 'tape id') 30 | 31 | title = self._og_search_title(webpage) 32 | description = self._og_search_description(webpage) 33 | thumbnail = self._html_search_meta( 34 | 'thumbnailUrl', webpage, 'thumbnails') or self._og_search_thumbnail(webpage) 35 | duration = parse_duration(self._html_search_meta( 36 | 'duration', webpage, 'duration')) 37 | 38 | video_url = 'http://www.historicfilms.com/video/%s_%s_web.mov' % (tape_id, video_id) 39 | 40 | return { 41 | 'id': video_id, 42 | 'url': video_url, 43 | 'title': title, 44 | 'description': description, 45 | 'thumbnail': thumbnail, 46 | 'duration': duration, 47 | } 48 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/hornbunny.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import ( 6 | int_or_none, 7 | parse_duration, 8 | ) 9 | 10 | 11 | class HornBunnyIE(InfoExtractor): 12 | _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P[a-z-]+)-(?P\d+)\.html' 13 | _TEST = { 14 | 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html', 15 | 'md5': 'e20fd862d1894b67564c96f180f43924', 16 | 'info_dict': { 17 | 'id': '5227', 18 | 'ext': 'mp4', 19 | 'title': 'panty slut jerk off instruction', 20 | 'duration': 550, 21 | 'age_limit': 18, 22 | 'view_count': int, 23 | 'thumbnail': r're:^https?://.*\.jpg$', 24 | } 25 | } 26 | 27 | def _real_extract(self, url): 28 | video_id = self._match_id(url) 29 | 30 | webpage = self._download_webpage(url, video_id) 31 | title = self._og_search_title(webpage) 32 | info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0] 33 | 34 | duration = parse_duration(self._search_regex( 35 | r'Runtime:\s*([0-9:]+)', 36 | webpage, 'duration', fatal=False)) 37 | view_count = int_or_none(self._search_regex( 38 | r'Views:\s*(\d+)', 39 | webpage, 'view count', fatal=False)) 40 | 41 | info_dict.update({ 42 | 'id': video_id, 43 | 'title': title, 44 | 'duration': duration, 45 | 'view_count': view_count, 46 | 'age_limit': 18, 47 | }) 48 | 49 | return info_dict 50 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/howcast.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..utils import parse_iso8601 5 | 6 | 7 | class HowcastIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P\d+)' 9 | _TEST = { 10 | 'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly', 11 | 'md5': '7d45932269a288149483144f01b99789', 12 | 'info_dict': { 13 | 'id': '390161', 14 | 'ext': 'mp4', 15 | 'title': 'How to Tie a Square Knot Properly', 16 | 'description': 'md5:dbe792e5f6f1489027027bf2eba188a3', 17 | 'timestamp': 1276081287, 18 | 'upload_date': '20100609', 19 | 'duration': 56.823, 20 | }, 21 | 'params': { 22 | 'skip_download': True, 23 | }, 24 | 'add_ie': ['Ooyala'], 25 | } 26 | 27 | def _real_extract(self, url): 28 | video_id = self._match_id(url) 29 | 30 | webpage = self._download_webpage(url, video_id) 31 | 32 | embed_code = self._search_regex( 33 | r']+src="[^"]+\bembed_code=([^\b]+)\b', 34 | webpage, 'ooyala embed code') 35 | 36 | return { 37 | '_type': 'url_transparent', 38 | 'ie_key': 'Ooyala', 39 | 'url': 'ooyala:%s' % embed_code, 40 | 'id': video_id, 41 | 'timestamp': parse_iso8601(self._html_search_meta( 42 | 'article:published_time', webpage, 'timestamp')), 43 | } 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/hypem.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..utils import int_or_none 5 | 6 | 7 | class HypemIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P[0-9a-z]{5})' 9 | _TEST = { 10 | 'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME', 11 | 'md5': 'b9cc91b5af8995e9f0c1cee04c575828', 12 | 'info_dict': { 13 | 'id': '1v6ga', 14 | 'ext': 'mp3', 15 | 'title': 'Tame', 16 | 'uploader': 'BODYWORK', 17 | 'timestamp': 1371810457, 18 | 'upload_date': '20130621', 19 | } 20 | } 21 | 22 | def _real_extract(self, url): 23 | track_id = self._match_id(url) 24 | 25 | response = self._download_webpage(url, track_id) 26 | 27 | track = self._parse_json(self._html_search_regex( 28 | r'(?s)(.+?)', 29 | response, 'tracks'), track_id)['tracks'][0] 30 | 31 | track_id = track['id'] 32 | title = track['song'] 33 | 34 | final_url = self._download_json( 35 | 'http://hypem.com/serve/source/%s/%s' % (track_id, track['key']), 36 | track_id, 'Downloading metadata', headers={ 37 | 'Content-Type': 'application/json' 38 | })['url'] 39 | 40 | return { 41 | 'id': track_id, 42 | 'url': final_url, 43 | 'ext': 'mp3', 44 | 'title': title, 45 | 'uploader': track.get('artist'), 46 | 'duration': int_or_none(track.get('time')), 47 | 'timestamp': int_or_none(track.get('ts')), 48 | 'track': title, 49 | } 50 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ku6.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class Ku6IE(InfoExtractor): 7 | _VALID_URL = r'https?://v\.ku6\.com/show/(?P[a-zA-Z0-9\-\_]+)(?:\.)*html' 8 | _TEST = { 9 | 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', 10 | 'md5': '01203549b9efbb45f4b87d55bdea1ed1', 11 | 'info_dict': { 12 | 'id': 'JG-8yS14xzBr4bCn1pu0xw', 13 | 'ext': 'f4v', 14 | 'title': 'techniques test', 15 | } 16 | } 17 | 18 | def _real_extract(self, url): 19 | video_id = self._match_id(url) 20 | webpage = self._download_webpage(url, video_id) 21 | 22 | title = self._html_search_regex( 23 | r'

    (.*?)

    ', webpage, 'title') 24 | dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id 25 | jsonData = self._download_json(dataUrl, video_id) 26 | downloadUrl = jsonData['data']['f'] 27 | 28 | return { 29 | 'id': video_id, 30 | 'title': title, 31 | 'url': downloadUrl 32 | } 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/lci.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class LCIIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?lci\.fr/[^/]+/[\w-]+-(?P\d+)\.html' 9 | _TEST = { 10 | 'url': 'http://www.lci.fr/international/etats-unis-a-j-62-hillary-clinton-reste-sans-voix-2001679.html', 11 | 'md5': '2fdb2538b884d4d695f9bd2bde137e6c', 12 | 'info_dict': { 13 | 'id': '13244802', 14 | 'ext': 'mp4', 15 | 'title': 'Hillary Clinton et sa quinte de toux, en plein meeting', 16 | 'description': 'md5:a4363e3a960860132f8124b62f4a01c9', 17 | } 18 | } 19 | 20 | def _real_extract(self, url): 21 | video_id = self._match_id(url) 22 | webpage = self._download_webpage(url, video_id) 23 | wat_id = self._search_regex( 24 | (r'data-watid=[\'"](\d+)', r'idwat["\']?\s*:\s*["\']?(\d+)'), 25 | webpage, 'wat id') 26 | return self.url_result('wat:' + wat_id, 'Wat', wat_id) 27 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/lenta.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class LentaIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P[^/?#&]+)' 9 | _TESTS = [{ 10 | 'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/', 11 | 'info_dict': { 12 | 'id': '964400', 13 | 'ext': 'mp4', 14 | 'title': 'Надежду Савченко задержали', 15 | 'thumbnail': r're:^https?://.*\.jpg$', 16 | 'duration': 61, 17 | 'view_count': int, 18 | }, 19 | 'params': { 20 | 'skip_download': True, 21 | }, 22 | }, { 23 | # EaglePlatform iframe embed 24 | 'url': 'http://lenta.ru/news/2015/03/06/navalny/', 25 | 'info_dict': { 26 | 'id': '227304', 27 | 'ext': 'mp4', 28 | 'title': 'Навальный вышел на свободу', 29 | 'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5', 30 | 'thumbnail': r're:^https?://.*\.jpg$', 31 | 'duration': 87, 32 | 'view_count': int, 33 | 'age_limit': 0, 34 | }, 35 | 'params': { 36 | 'skip_download': True, 37 | }, 38 | }] 39 | 40 | def _real_extract(self, url): 41 | display_id = self._match_id(url) 42 | 43 | webpage = self._download_webpage(url, display_id) 44 | 45 | video_id = self._search_regex( 46 | r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id', 47 | default=None) 48 | if video_id: 49 | return self.url_result( 50 | 'eagleplatform:lentaru.media.eagleplatform.com:%s' % video_id, 51 | ie='EaglePlatform', video_id=video_id) 52 | 53 | return self.url_result(url, ie='Generic') 54 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/livejournal.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import compat_str 6 | from ..utils import int_or_none 7 | 8 | 9 | class LiveJournalIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:[^.]+\.)?livejournal\.com/video/album/\d+.+?\bid=(?P\d+)' 11 | _TEST = { 12 | 'url': 'https://andrei-bt.livejournal.com/video/album/407/?mode=view&id=51272', 13 | 'md5': 'adaf018388572ced8a6f301ace49d4b2', 14 | 'info_dict': { 15 | 'id': '1263729', 16 | 'ext': 'mp4', 17 | 'title': 'Истребители против БПЛА', 18 | 'upload_date': '20190624', 19 | 'timestamp': 1561406715, 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | webpage = self._download_webpage(url, video_id) 26 | record = self._parse_json(self._search_regex( 27 | r'Site\.page\s*=\s*({.+?});', webpage, 28 | 'page data'), video_id)['video']['record'] 29 | storage_id = compat_str(record['storageid']) 30 | title = record.get('name') 31 | if title: 32 | # remove filename extension(.mp4, .mov, etc...) 33 | title = title.rsplit('.', 1)[0] 34 | return { 35 | '_type': 'url_transparent', 36 | 'id': video_id, 37 | 'title': title, 38 | 'thumbnail': record.get('thumbnail'), 39 | 'timestamp': int_or_none(record.get('timecreate')), 40 | 'url': 'eagleplatform:vc.videos.livejournal.com:' + storage_id, 41 | 'ie_key': 'EaglePlatform', 42 | } 43 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/lovehomeporn.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import re 4 | 5 | from .nuevo import NuevoBaseIE 6 | 7 | 8 | class LoveHomePornIE(NuevoBaseIE): 9 | _VALID_URL = r'https?://(?:www\.)?lovehomeporn\.com/video/(?P\d+)(?:/(?P[^/?#&]+))?' 10 | _TEST = { 11 | 'url': 'http://lovehomeporn.com/video/48483/stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick#menu', 12 | 'info_dict': { 13 | 'id': '48483', 14 | 'display_id': 'stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick', 15 | 'ext': 'mp4', 16 | 'title': 'Stunning busty brunette girlfriend sucking and riding a big dick', 17 | 'age_limit': 18, 18 | 'duration': 238.47, 19 | }, 20 | 'params': { 21 | 'skip_download': True, 22 | } 23 | } 24 | 25 | def _real_extract(self, url): 26 | mobj = re.match(self._VALID_URL, url) 27 | video_id = mobj.group('id') 28 | display_id = mobj.group('display_id') 29 | 30 | info = self._extract_nuevo( 31 | 'http://lovehomeporn.com/media/nuevo/config.php?key=%s' % video_id, 32 | video_id) 33 | info.update({ 34 | 'display_id': display_id, 35 | 'age_limit': 18 36 | }) 37 | return info 38 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/m6.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class M6IE(InfoExtractor): 8 | IE_NAME = 'm6' 9 | _VALID_URL = r'https?://(?:www\.)?m6\.fr/[^/]+/videos/(?P\d+)-[^\.]+\.html' 10 | 11 | _TEST = { 12 | 'url': 'http://www.m6.fr/emission-les_reines_du_shopping/videos/11323908-emeline_est_la_reine_du_shopping_sur_le_theme_ma_fete_d_8217_anniversaire.html', 13 | 'md5': '242994a87de2c316891428e0176bcb77', 14 | 'info_dict': { 15 | 'id': '11323908', 16 | 'ext': 'mp4', 17 | 'title': 'Emeline est la Reine du Shopping sur le thème « Ma fête d’anniversaire ! »', 18 | 'description': 'md5:1212ae8fb4b7baa4dc3886c5676007c2', 19 | 'duration': 100, 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | return self.url_result('6play:%s' % video_id, 'SixPlay', video_id) 26 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/miaopai.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class MiaoPaiIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?miaopai\.com/show/(?P[-A-Za-z0-9~_]+)' 9 | _TEST = { 10 | 'url': 'http://www.miaopai.com/show/n~0hO7sfV1nBEw4Y29-Hqg__.htm', 11 | 'md5': '095ed3f1cd96b821add957bdc29f845b', 12 | 'info_dict': { 13 | 'id': 'n~0hO7sfV1nBEw4Y29-Hqg__', 14 | 'ext': 'mp4', 15 | 'title': '西游记音乐会的秒拍视频', 16 | 'thumbnail': 're:^https?://.*/n~0hO7sfV1nBEw4Y29-Hqg___m.jpg', 17 | } 18 | } 19 | 20 | _USER_AGENT_IPAD = 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1' 21 | 22 | def _real_extract(self, url): 23 | video_id = self._match_id(url) 24 | webpage = self._download_webpage( 25 | url, video_id, headers={'User-Agent': self._USER_AGENT_IPAD}) 26 | 27 | title = self._html_search_regex( 28 | r'([^<]+)', webpage, 'title') 29 | thumbnail = self._html_search_regex( 30 | r']+class=(?P[\'"]).*\bvideo_img\b.*(?P=q1)[^>]+data-url=(?P[\'"])(?P[^\'"]+)(?P=q2)', 31 | webpage, 'thumbnail', fatal=False, group='url') 32 | videos = self._parse_html5_media_entries(url, webpage, video_id) 33 | info = videos[0] 34 | 35 | info.update({ 36 | 'id': video_id, 37 | 'title': title, 38 | 'thumbnail': thumbnail, 39 | }) 40 | return info 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/moviezine.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .common import InfoExtractor 7 | 8 | 9 | class MoviezineIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:www\.)?moviezine\.se/video/(?P[^?#]+)' 11 | 12 | _TEST = { 13 | 'url': 'http://www.moviezine.se/video/205866', 14 | 'info_dict': { 15 | 'id': '205866', 16 | 'ext': 'mp4', 17 | 'title': 'Oculus - Trailer 1', 18 | 'description': 'md5:40cc6790fc81d931850ca9249b40e8a4', 19 | 'thumbnail': r're:http://.*\.jpg', 20 | }, 21 | } 22 | 23 | def _real_extract(self, url): 24 | mobj = re.match(self._VALID_URL, url) 25 | video_id = mobj.group('id') 26 | 27 | webpage = self._download_webpage(url, video_id) 28 | jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player') 29 | 30 | formats = [{ 31 | 'format_id': 'sd', 32 | 'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'), 33 | 'quality': 0, 34 | 'ext': 'mp4', 35 | }] 36 | 37 | self._sort_formats(formats) 38 | 39 | return { 40 | 'id': video_id, 41 | 'title': self._search_regex(r'title: "(.+?)",', jsplayer, 'title'), 42 | 'thumbnail': self._search_regex(r'image: "(.+?)",', jsplayer, 'image'), 43 | 'formats': formats, 44 | 'description': self._og_search_description(webpage), 45 | } 46 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/mychannels.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .common import InfoExtractor 7 | 8 | 9 | class MyChannelsIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:www\.)?mychannels\.com/.*(?Pvideo|production)_id=(?P[0-9]+)' 11 | _TEST = { 12 | 'url': 'https://mychannels.com/missholland/miss-holland?production_id=3416', 13 | 'md5': 'b8993daad4262dd68d89d651c0c52c45', 14 | 'info_dict': { 15 | 'id': 'wUUDZZep6vQD', 16 | 'ext': 'mp4', 17 | 'title': 'Miss Holland joins VOTE LEAVE', 18 | 'description': 'Miss Holland | #13 Not a potato', 19 | 'uploader': 'Miss Holland', 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | id_type, url_id = re.match(self._VALID_URL, url).groups() 25 | webpage = self._download_webpage(url, url_id) 26 | video_data = self._html_search_regex(r']+data-%s-id="%s"[^>]+)>' % (id_type, url_id), webpage, 'video data') 27 | 28 | def extract_data_val(attr, fatal=False): 29 | return self._html_search_regex(r'data-%s\s*=\s*"([^"]+)"' % attr, video_data, attr, fatal=fatal) 30 | minoto_id = extract_data_val('minoto-id') or self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id') 31 | 32 | return { 33 | '_type': 'url_transparent', 34 | 'url': 'minoto:%s' % minoto_id, 35 | 'id': url_id, 36 | 'title': extract_data_val('title', True), 37 | 'description': extract_data_val('description'), 38 | 'thumbnail': extract_data_val('image'), 39 | 'uploader': extract_data_val('channel'), 40 | } 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/myvidster.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class MyVidsterIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?myvidster\.com/video/(?P\d+)/' 8 | 9 | _TEST = { 10 | 'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making', 11 | 'md5': '95296d0231c1363222c3441af62dc4ca', 12 | 'info_dict': { 13 | 'id': '3685814', 14 | 'title': 'md5:7d8427d6d02c4fbcef50fe269980c749', 15 | 'upload_date': '20141027', 16 | 'uploader': 'utkualp', 17 | 'ext': 'mp4', 18 | 'age_limit': 18, 19 | }, 20 | 'add_ie': ['XHamster'], 21 | } 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | webpage = self._download_webpage(url, video_id) 26 | 27 | return self.url_result(self._html_search_regex( 28 | r'rel="videolink" href="(?P.*)">', 29 | webpage, 'real video url')) 30 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/nerdcubed.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import datetime 5 | 6 | from .common import InfoExtractor 7 | 8 | 9 | class NerdCubedFeedIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/feed\.json' 11 | _TEST = { 12 | 'url': 'http://www.nerdcubed.co.uk/feed.json', 13 | 'info_dict': { 14 | 'id': 'nerdcubed-feed', 15 | 'title': 'nerdcubed.co.uk feed', 16 | }, 17 | 'playlist_mincount': 1300, 18 | } 19 | 20 | def _real_extract(self, url): 21 | feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed') 22 | 23 | entries = [{ 24 | '_type': 'url', 25 | 'title': feed_entry['title'], 26 | 'uploader': feed_entry['source']['name'] if feed_entry['source'] else None, 27 | 'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'), 28 | 'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'], 29 | } for feed_entry in feed] 30 | 31 | return { 32 | '_type': 'playlist', 33 | 'title': 'nerdcubed.co.uk feed', 34 | 'id': 'nerdcubed-feed', 35 | 'entries': entries, 36 | } 37 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/nonktube.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .nuevo import NuevoBaseIE 4 | 5 | 6 | class NonkTubeIE(NuevoBaseIE): 7 | _VALID_URL = r'https?://(?:www\.)?nonktube\.com/(?:(?:video|embed)/|media/nuevo/embed\.php\?.*?\bid=)(?P\d+)' 8 | _TESTS = [{ 9 | 'url': 'https://www.nonktube.com/video/118636/sensual-wife-uncensored-fucked-in-hairy-pussy-and-facialized', 10 | 'info_dict': { 11 | 'id': '118636', 12 | 'ext': 'mp4', 13 | 'title': 'Sensual Wife Uncensored Fucked In Hairy Pussy And Facialized', 14 | 'age_limit': 18, 15 | 'duration': 1150.98, 16 | }, 17 | 'params': { 18 | 'skip_download': True, 19 | } 20 | }, { 21 | 'url': 'https://www.nonktube.com/embed/118636', 22 | 'only_matching': True, 23 | }] 24 | 25 | def _real_extract(self, url): 26 | video_id = self._match_id(url) 27 | 28 | webpage = self._download_webpage(url, video_id) 29 | 30 | title = self._og_search_title(webpage) 31 | info = self._parse_html5_media_entries(url, webpage, video_id)[0] 32 | 33 | info.update({ 34 | 'id': video_id, 35 | 'title': title, 36 | 'age_limit': 18, 37 | }) 38 | return info 39 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/nrl.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class NRLTVIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?nrl\.com/tv(/[^/]+)*/(?P[^/?&#]+)' 9 | _TEST = { 10 | 'url': 'https://www.nrl.com/tv/news/match-highlights-titans-v-knights-862805/', 11 | 'info_dict': { 12 | 'id': 'YyNnFuaDE6kPJqlDhG4CGQ_w89mKTau4', 13 | 'ext': 'mp4', 14 | 'title': 'Match Highlights: Titans v Knights', 15 | }, 16 | 'params': { 17 | # m3u8 download 18 | 'skip_download': True, 19 | 'format': 'bestvideo', 20 | }, 21 | } 22 | 23 | def _real_extract(self, url): 24 | display_id = self._match_id(url) 25 | webpage = self._download_webpage(url, display_id) 26 | q_data = self._parse_json(self._html_search_regex( 27 | r'(?s)q-data="({.+?})"', webpage, 'player data'), display_id) 28 | ooyala_id = q_data['videoId'] 29 | return self.url_result( 30 | 'ooyala:' + ooyala_id, 'Ooyala', ooyala_id, q_data.get('title')) 31 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/nuevo.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | from ..utils import ( 7 | float_or_none, 8 | xpath_text 9 | ) 10 | 11 | 12 | class NuevoBaseIE(InfoExtractor): 13 | def _extract_nuevo(self, config_url, video_id, headers={}): 14 | config = self._download_xml( 15 | config_url, video_id, transform_source=lambda s: s.strip(), 16 | headers=headers) 17 | 18 | title = xpath_text(config, './title', 'title', fatal=True).strip() 19 | video_id = xpath_text(config, './mediaid', default=video_id) 20 | thumbnail = xpath_text(config, ['./image', './thumb']) 21 | duration = float_or_none(xpath_text(config, './duration')) 22 | 23 | formats = [] 24 | for element_name, format_id in (('file', 'sd'), ('filehd', 'hd')): 25 | video_url = xpath_text(config, element_name) 26 | if video_url: 27 | formats.append({ 28 | 'url': video_url, 29 | 'format_id': format_id, 30 | }) 31 | self._check_formats(formats, video_id) 32 | 33 | return { 34 | 'id': video_id, 35 | 'title': title, 36 | 'thumbnail': thumbnail, 37 | 'duration': duration, 38 | 'formats': formats 39 | } 40 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/nzz.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | import re 5 | 6 | from .common import InfoExtractor 7 | from ..utils import ( 8 | extract_attributes, 9 | ) 10 | 11 | 12 | class NZZIE(InfoExtractor): 13 | _VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P\d+)' 14 | _TESTS = [{ 15 | 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153', 16 | 'info_dict': { 17 | 'id': '9153', 18 | }, 19 | 'playlist_mincount': 6, 20 | }, { 21 | 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112', 22 | 'info_dict': { 23 | 'id': '1368112', 24 | }, 25 | 'playlist_count': 1, 26 | }] 27 | 28 | def _real_extract(self, url): 29 | page_id = self._match_id(url) 30 | webpage = self._download_webpage(url, page_id) 31 | 32 | entries = [] 33 | for player_element in re.findall( 34 | r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage): 35 | player_params = extract_attributes(player_element) 36 | if player_params.get('data-type') not in ('kaltura_singleArticle',): 37 | self.report_warning('Unsupported player type') 38 | continue 39 | entry_id = player_params['data-id'] 40 | entries.append(self.url_result( 41 | 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id)) 42 | 43 | return self.playlist_result(entries, page_id) 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/odatv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import ( 6 | ExtractorError, 7 | NO_DEFAULT, 8 | remove_start 9 | ) 10 | 11 | 12 | class OdaTVIE(InfoExtractor): 13 | _VALID_URL = r'https?://(?:www\.)?odatv\.com/(?:mob|vid)_video\.php\?.*\bid=(?P[^&]+)' 14 | _TESTS = [{ 15 | 'url': 'http://odatv.com/vid_video.php?id=8E388', 16 | 'md5': 'dc61d052f205c9bf2da3545691485154', 17 | 'info_dict': { 18 | 'id': '8E388', 19 | 'ext': 'mp4', 20 | 'title': 'Artık Davutoğlu ile devam edemeyiz' 21 | } 22 | }, { 23 | # mobile URL 24 | 'url': 'http://odatv.com/mob_video.php?id=8E388', 25 | 'only_matching': True, 26 | }, { 27 | # no video 28 | 'url': 'http://odatv.com/mob_video.php?id=8E900', 29 | 'only_matching': True, 30 | }] 31 | 32 | def _real_extract(self, url): 33 | video_id = self._match_id(url) 34 | webpage = self._download_webpage(url, video_id) 35 | 36 | no_video = 'NO VIDEO!' in webpage 37 | 38 | video_url = self._search_regex( 39 | r'mp4\s*:\s*(["\'])(?Phttp.+?)\1', webpage, 'video url', 40 | default=None if no_video else NO_DEFAULT, group='url') 41 | 42 | if no_video: 43 | raise ExtractorError('Video %s does not exist' % video_id, expected=True) 44 | 45 | return { 46 | 'id': video_id, 47 | 'url': video_url, 48 | 'title': remove_start(self._og_search_title(webpage), 'Video: '), 49 | 'thumbnail': self._og_search_thumbnail(webpage), 50 | } 51 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/oktoberfesttv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class OktoberfestTVIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P[^/?#]+)' 9 | 10 | _TEST = { 11 | 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt', 12 | 'info_dict': { 13 | 'id': 'hb-zelt', 14 | 'ext': 'mp4', 15 | 'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 16 | 'thumbnail': r're:^https?://.*\.jpg$', 17 | 'is_live': True, 18 | }, 19 | 'params': { 20 | 'skip_download': True, 21 | } 22 | } 23 | 24 | def _real_extract(self, url): 25 | video_id = self._match_id(url) 26 | webpage = self._download_webpage(url, video_id) 27 | 28 | title = self._live_title(self._html_search_regex( 29 | r'

    .*?(.*?)

    ', webpage, 'title')) 30 | 31 | clip = self._search_regex( 32 | r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip') 33 | ncurl = self._search_regex( 34 | r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base') 35 | video_url = ncurl + clip 36 | thumbnail = self._search_regex( 37 | r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage, 38 | 'thumbnail', fatal=False) 39 | 40 | return { 41 | 'id': video_id, 42 | 'title': title, 43 | 'url': video_url, 44 | 'ext': 'mp4', 45 | 'is_live': True, 46 | 'thumbnail': thumbnail, 47 | } 48 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/outsidetv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class OutsideTVIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?outsidetv\.com/(?:[^/]+/)*?play/[a-zA-Z0-9]{8}/\d+/\d+/(?P[a-zA-Z0-9]{8})' 9 | _TESTS = [{ 10 | 'url': 'http://www.outsidetv.com/category/snow/play/ZjQYboH6/1/10/Hdg0jukV/4', 11 | 'md5': '192d968fedc10b2f70ec31865ffba0da', 12 | 'info_dict': { 13 | 'id': 'Hdg0jukV', 14 | 'ext': 'mp4', 15 | 'title': 'Home - Jackson Ep 1 | Arbor Snowboards', 16 | 'description': 'md5:41a12e94f3db3ca253b04bb1e8d8f4cd', 17 | 'upload_date': '20181225', 18 | 'timestamp': 1545742800, 19 | } 20 | }, { 21 | 'url': 'http://www.outsidetv.com/home/play/ZjQYboH6/1/10/Hdg0jukV/4', 22 | 'only_matching': True, 23 | }] 24 | 25 | def _real_extract(self, url): 26 | jw_media_id = self._match_id(url) 27 | return self.url_result( 28 | 'jwplatform:' + jw_media_id, 'JWPlatform', jw_media_id) 29 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/parliamentliveuk.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class ParliamentLiveUKIE(InfoExtractor): 7 | IE_NAME = 'parliamentlive.tv' 8 | IE_DESC = 'UK parliament videos' 9 | _VALID_URL = r'(?i)https?://(?:www\.)?parliamentlive\.tv/Event/Index/(?P[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' 10 | 11 | _TESTS = [{ 12 | 'url': 'http://parliamentlive.tv/Event/Index/c1e9d44d-fd6c-4263-b50f-97ed26cc998b', 13 | 'info_dict': { 14 | 'id': '1_af9nv9ym', 15 | 'ext': 'mp4', 16 | 'title': 'Home Affairs Committee', 17 | 'uploader_id': 'FFMPEG-01', 18 | 'timestamp': 1422696664, 19 | 'upload_date': '20150131', 20 | }, 21 | }, { 22 | 'url': 'http://parliamentlive.tv/event/index/3f24936f-130f-40bf-9a5d-b3d6479da6a4', 23 | 'only_matching': True, 24 | }] 25 | 26 | def _real_extract(self, url): 27 | video_id = self._match_id(url) 28 | webpage = self._download_webpage( 29 | 'http://vodplayer.parliamentlive.tv/?mid=' + video_id, video_id) 30 | widget_config = self._parse_json(self._search_regex( 31 | r'(?s)kWidgetConfig\s*=\s*({.+});', 32 | webpage, 'kaltura widget config'), video_id) 33 | kaltura_url = 'kaltura:%s:%s' % ( 34 | widget_config['wid'][1:], widget_config['entry_id']) 35 | event_title = self._download_json( 36 | 'http://parliamentlive.tv/Event/GetShareVideo/' + video_id, video_id)['event']['title'] 37 | return { 38 | '_type': 'url_transparent', 39 | 'title': event_title, 40 | 'description': '', 41 | 'url': kaltura_url, 42 | 'ie_key': 'Kaltura', 43 | } 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/people.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class PeopleIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?people\.com/people/videos/0,,(?P\d+),00\.html' 9 | 10 | _TEST = { 11 | 'url': 'http://www.people.com/people/videos/0,,20995451,00.html', 12 | 'info_dict': { 13 | 'id': 'ref:20995451', 14 | 'ext': 'mp4', 15 | 'title': 'Astronaut Love Triangle Victim Speaks Out: “The Crime in 2007 Hasn’t Defined Us”', 16 | 'description': 'Colleen Shipman speaks to PEOPLE for the first time about life after the attack', 17 | 'thumbnail': r're:^https?://.*\.jpg', 18 | 'duration': 246.318, 19 | 'timestamp': 1458720585, 20 | 'upload_date': '20160323', 21 | 'uploader_id': '416418724', 22 | }, 23 | 'params': { 24 | 'skip_download': True, 25 | }, 26 | 'add_ie': ['BrightcoveNew'], 27 | } 28 | 29 | def _real_extract(self, url): 30 | return self.url_result( 31 | 'http://players.brightcove.net/416418724/default_default/index.html?videoId=ref:%s' 32 | % self._match_id(url), 'BrightcoveNew') 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/restudy.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class RestudyIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:(?:www|portal)\.)?restudy\.dk/video/[^/]+/id/(?P[0-9]+)' 9 | _TESTS = [{ 10 | 'url': 'https://www.restudy.dk/video/play/id/1637', 11 | 'info_dict': { 12 | 'id': '1637', 13 | 'ext': 'flv', 14 | 'title': 'Leiden-frosteffekt', 15 | 'description': 'Denne video er et eksperiment med flydende kvælstof.', 16 | }, 17 | 'params': { 18 | # rtmp download 19 | 'skip_download': True, 20 | } 21 | }, { 22 | 'url': 'https://portal.restudy.dk/video/leiden-frosteffekt/id/1637', 23 | 'only_matching': True, 24 | }] 25 | 26 | def _real_extract(self, url): 27 | video_id = self._match_id(url) 28 | 29 | webpage = self._download_webpage(url, video_id) 30 | 31 | title = self._og_search_title(webpage).strip() 32 | description = self._og_search_description(webpage).strip() 33 | 34 | formats = self._extract_smil_formats( 35 | 'https://cdn.portal.restudy.dk/dynamic/themes/front/awsmedia/SmilDirectory/video_%s.xml' % video_id, 36 | video_id) 37 | self._sort_formats(formats) 38 | 39 | return { 40 | 'id': video_id, 41 | 'title': title, 42 | 'description': description, 43 | 'formats': formats, 44 | } 45 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/reverbnation.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..utils import ( 5 | qualities, 6 | str_or_none, 7 | ) 8 | 9 | 10 | class ReverbNationIE(InfoExtractor): 11 | _VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P\d+).*?$' 12 | _TESTS = [{ 13 | 'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa', 14 | 'md5': 'c0aaf339bcee189495fdf5a8c8ba8645', 15 | 'info_dict': { 16 | 'id': '16965047', 17 | 'ext': 'mp3', 18 | 'title': 'MONA LISA', 19 | 'uploader': 'ALKILADOS', 20 | 'uploader_id': '216429', 21 | 'thumbnail': r're:^https?://.*\.jpg', 22 | }, 23 | }] 24 | 25 | def _real_extract(self, url): 26 | song_id = self._match_id(url) 27 | 28 | api_res = self._download_json( 29 | 'https://api.reverbnation.com/song/%s' % song_id, 30 | song_id, 31 | note='Downloading information of song %s' % song_id 32 | ) 33 | 34 | THUMBNAILS = ('thumbnail', 'image') 35 | quality = qualities(THUMBNAILS) 36 | thumbnails = [] 37 | for thumb_key in THUMBNAILS: 38 | if api_res.get(thumb_key): 39 | thumbnails.append({ 40 | 'url': api_res[thumb_key], 41 | 'preference': quality(thumb_key) 42 | }) 43 | 44 | return { 45 | 'id': song_id, 46 | 'title': api_res['name'], 47 | 'url': api_res['url'], 48 | 'uploader': api_res.get('artist', {}).get('name'), 49 | 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')), 50 | 'thumbnails': thumbnails, 51 | 'ext': 'mp3', 52 | 'vcodec': 'none', 53 | } 54 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ro220.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..compat import compat_urllib_parse_unquote 5 | 6 | 7 | class Ro220IE(InfoExtractor): 8 | IE_NAME = '220.ro' 9 | _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)' 10 | _TEST = { 11 | 'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/', 12 | 'md5': '03af18b73a07b4088753930db7a34add', 13 | 'info_dict': { 14 | 'id': 'LYV6doKo7f', 15 | 'ext': 'mp4', 16 | 'title': 'Luati-le Banii sez 4 ep 1', 17 | 'description': r're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$', 18 | } 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | 24 | webpage = self._download_webpage(url, video_id) 25 | url = compat_urllib_parse_unquote(self._search_regex( 26 | r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url')) 27 | title = self._og_search_title(webpage) 28 | description = self._og_search_description(webpage) 29 | thumbnail = self._og_search_thumbnail(webpage) 30 | 31 | formats = [{ 32 | 'format_id': 'sd', 33 | 'url': url, 34 | 'ext': 'mp4', 35 | }] 36 | 37 | return { 38 | 'id': video_id, 39 | 'formats': formats, 40 | 'title': title, 41 | 'description': description, 42 | 'thumbnail': thumbnail, 43 | } 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/rottentomatoes.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .internetvideoarchive import InternetVideoArchiveIE 5 | 6 | 7 | class RottenTomatoesIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P\d+)' 9 | 10 | _TEST = { 11 | 'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/', 12 | 'info_dict': { 13 | 'id': '11028566', 14 | 'ext': 'mp4', 15 | 'title': 'Toy Story 3', 16 | 'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.', 17 | 'thumbnail': r're:^https?://.*\.jpg$', 18 | }, 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | webpage = self._download_webpage(url, video_id) 24 | iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id') 25 | 26 | return { 27 | '_type': 'url_transparent', 28 | 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id, 29 | 'ie_key': InternetVideoArchiveIE.ie_key(), 30 | 'id': video_id, 31 | 'title': self._og_search_title(webpage), 32 | } 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/rtvs.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class RTVSIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?rtvs\.sk/(?:radio|televizia)/archiv/\d+/(?P\d+)' 9 | _TESTS = [{ 10 | # radio archive 11 | 'url': 'http://www.rtvs.sk/radio/archiv/11224/414872', 12 | 'md5': '134d5d6debdeddf8a5d761cbc9edacb8', 13 | 'info_dict': { 14 | 'id': '414872', 15 | 'ext': 'mp3', 16 | 'title': 'Ostrov pokladov 1 časť.mp3' 17 | }, 18 | 'params': { 19 | 'skip_download': True, 20 | } 21 | }, { 22 | # tv archive 23 | 'url': 'http://www.rtvs.sk/televizia/archiv/8249/63118', 24 | 'md5': '85e2c55cf988403b70cac24f5c086dc6', 25 | 'info_dict': { 26 | 'id': '63118', 27 | 'ext': 'mp4', 28 | 'title': 'Amaro Džives - Náš deň', 29 | 'description': 'Galavečer pri príležitosti Medzinárodného dňa Rómov.' 30 | }, 31 | 'params': { 32 | 'skip_download': True, 33 | } 34 | }] 35 | 36 | def _real_extract(self, url): 37 | video_id = self._match_id(url) 38 | 39 | webpage = self._download_webpage(url, video_id) 40 | 41 | playlist_url = self._search_regex( 42 | r'playlist["\']?\s*:\s*(["\'])(?P(?:(?!\1).)+)\1', webpage, 43 | 'playlist url', group='url') 44 | 45 | data = self._download_json( 46 | playlist_url, video_id, 'Downloading playlist')[0] 47 | return self._parse_jwplayer_data(data, video_id=video_id) 48 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ruhd.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class RUHDIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?ruhd\.ru/play\.php\?vid=(?P\d+)' 9 | _TEST = { 10 | 'url': 'http://www.ruhd.ru/play.php?vid=207', 11 | 'md5': 'd1a9ec4edf8598e3fbd92bb16072ba83', 12 | 'info_dict': { 13 | 'id': '207', 14 | 'ext': 'divx', 15 | 'title': 'КОТ бааааам', 16 | 'description': 'классный кот)', 17 | 'thumbnail': r're:^http://.*\.jpg$', 18 | } 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | webpage = self._download_webpage(url, video_id) 24 | 25 | video_url = self._html_search_regex( 26 | r'([^<]+)   RUHD\.ru - Видео Высокого качества №1 в России!', 29 | webpage, 'title') 30 | description = self._html_search_regex( 31 | r'(?s)
    (.+?)', 32 | webpage, 'description', fatal=False) 33 | thumbnail = self._html_search_regex( 34 | r'.*)$' 13 | 14 | _TEST = { 15 | 'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com', 16 | 'info_dict': { 17 | 'id': 'UlVRAPW2WJY', 18 | 'ext': 'mp4', 19 | 'title': 'About Team Radical MMA | MMA Fighting', 20 | 'upload_date': '20120816', 21 | 'uploader': 'Howcast', 22 | 'uploader_id': 'Howcast', 23 | 'description': r're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*', 24 | }, 25 | 'params': { 26 | 'skip_download': True 27 | } 28 | } 29 | 30 | def _real_extract(self, url): 31 | mobj = re.match(self._VALID_URL, url) 32 | video_id = os.path.splitext(url.split('/')[-1])[0] 33 | 34 | return self.url_result(mobj.group('url'), video_id=video_id) 35 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/screencastomatic.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import js_to_json 6 | 7 | 8 | class ScreencastOMaticIE(InfoExtractor): 9 | _VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P[0-9a-zA-Z]+)' 10 | _TEST = { 11 | 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', 12 | 'md5': '483583cb80d92588f15ccbedd90f0c18', 13 | 'info_dict': { 14 | 'id': 'c2lD3BeOPl', 15 | 'ext': 'mp4', 16 | 'title': 'Welcome to 3-4 Philosophy @ DECV!', 17 | 'thumbnail': r're:^https?://.*\.jpg$', 18 | 'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.', 19 | 'duration': 369.163, 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | webpage = self._download_webpage(url, video_id) 26 | 27 | jwplayer_data = self._parse_json( 28 | self._search_regex( 29 | r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);", webpage, 'setup code'), 30 | video_id, transform_source=js_to_json) 31 | 32 | info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False) 33 | info_dict.update({ 34 | 'title': self._og_search_title(webpage), 35 | 'description': self._og_search_description(webpage), 36 | }) 37 | return info_dict 38 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/skylinewebcams.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class SkylineWebcamsIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P[^/]+)\.html' 9 | _TEST = { 10 | 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', 11 | 'info_dict': { 12 | 'id': 'scalinata-piazza-di-spagna-barcaccia', 13 | 'ext': 'mp4', 14 | 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 15 | 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', 16 | 'is_live': True, 17 | }, 18 | 'params': { 19 | 'skip_download': True, 20 | } 21 | } 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | 26 | webpage = self._download_webpage(url, video_id) 27 | 28 | stream_url = self._search_regex( 29 | r'(?:url|source)\s*:\s*(["\'])(?P(?:https?:)?//.+?\.m3u8.*?)\1', webpage, 30 | 'stream url', group='url') 31 | 32 | title = self._og_search_title(webpage) 33 | description = self._og_search_description(webpage) 34 | 35 | return { 36 | 'id': video_id, 37 | 'url': stream_url, 38 | 'ext': 'mp4', 39 | 'title': self._live_title(title), 40 | 'description': description, 41 | 'is_live': True, 42 | } 43 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/sonyliv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import smuggle_url 6 | 7 | 8 | class SonyLIVIE(InfoExtractor): 9 | _VALID_URL = r'https?://(?:www\.)?sonyliv\.com/details/[^/]+/(?P\d+)' 10 | _TESTS = [{ 11 | 'url': "http://www.sonyliv.com/details/episodes/5024612095001/Ep.-1---Achaari-Cheese-Toast---Bachelor's-Delight", 12 | 'info_dict': { 13 | 'title': "Ep. 1 - Achaari Cheese Toast - Bachelor's Delight", 14 | 'id': 'ref:5024612095001', 15 | 'ext': 'mp4', 16 | 'upload_date': '20170923', 17 | 'description': 'md5:7f28509a148d5be9d0782b4d5106410d', 18 | 'uploader_id': '5182475815001', 19 | 'timestamp': 1506200547, 20 | }, 21 | 'params': { 22 | 'skip_download': True, 23 | }, 24 | 'add_ie': ['BrightcoveNew'], 25 | }, { 26 | 'url': 'http://www.sonyliv.com/details/full%20movie/4951168986001/Sei-Raat-(Bangla)', 27 | 'only_matching': True, 28 | }] 29 | 30 | # BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4338955589001/default_default/index.html?videoId=%s' 31 | BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5182475815001/default_default/index.html?videoId=ref:%s' 32 | 33 | def _real_extract(self, url): 34 | brightcove_id = self._match_id(url) 35 | return self.url_result( 36 | smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, { 37 | 'geo_countries': ['IN'], 38 | 'referrer': url, 39 | }), 40 | 'BrightcoveNew', brightcove_id) 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/spiegeltv.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .nexx import NexxIE 5 | 6 | 7 | class SpiegeltvIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P\d+)' 9 | _TEST = { 10 | 'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/', 11 | 'only_matching': True, 12 | } 13 | 14 | def _real_extract(self, url): 15 | return self.url_result( 16 | 'https://api.nexx.cloud/v3/748/videos/byid/%s' 17 | % self._match_id(url), ie=NexxIE.ie_key()) 18 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/streetvoice.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..compat import compat_str 6 | from ..utils import unified_strdate 7 | 8 | 9 | class StreetVoiceIE(InfoExtractor): 10 | _VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P[0-9]+)' 11 | _TESTS = [{ 12 | 'url': 'http://streetvoice.com/skippylu/songs/94440/', 13 | 'md5': '15974627fc01a29e492c98593c2fd472', 14 | 'info_dict': { 15 | 'id': '94440', 16 | 'ext': 'mp3', 17 | 'title': '輸', 18 | 'description': 'Crispy脆樂團 - 輸', 19 | 'thumbnail': r're:^https?://.*\.jpg$', 20 | 'duration': 260, 21 | 'upload_date': '20091018', 22 | 'uploader': 'Crispy脆樂團', 23 | 'uploader_id': '627810', 24 | } 25 | }, { 26 | 'url': 'http://tw.streetvoice.com/skippylu/songs/94440/', 27 | 'only_matching': True, 28 | }] 29 | 30 | def _real_extract(self, url): 31 | song_id = self._match_id(url) 32 | 33 | song = self._download_json( 34 | 'https://streetvoice.com/api/v1/public/song/%s/' % song_id, song_id, data=b'') 35 | 36 | title = song['name'] 37 | author = song['user']['nickname'] 38 | 39 | return { 40 | 'id': song_id, 41 | 'url': song['file'], 42 | 'title': title, 43 | 'description': '%s - %s' % (author, title), 44 | 'thumbnail': self._proto_relative_url(song.get('image'), 'http:'), 45 | 'duration': song.get('length'), 46 | 'upload_date': unified_strdate(song.get('created_at')), 47 | 'uploader': author, 48 | 'uploader_id': compat_str(song['user']['id']), 49 | } 50 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/stretchinternet.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..utils import int_or_none 5 | 6 | 7 | class StretchInternetIE(InfoExtractor): 8 | _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/(?:portal|full)\.htm\?.*?\beventId=(?P\d+)' 9 | _TEST = { 10 | 'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=573272&streamType=video', 11 | 'info_dict': { 12 | 'id': '573272', 13 | 'ext': 'mp4', 14 | 'title': 'University of Mary Wrestling vs. Upper Iowa', 15 | 'timestamp': 1575668361, 16 | 'upload_date': '20191206', 17 | } 18 | } 19 | 20 | def _real_extract(self, url): 21 | video_id = self._match_id(url) 22 | 23 | event = self._download_json( 24 | 'https://api.stretchinternet.com/trinity/event/tcg/' + video_id, 25 | video_id)[0] 26 | 27 | return { 28 | 'id': video_id, 29 | 'title': event['title'], 30 | 'timestamp': int_or_none(event.get('dateCreated'), 1000), 31 | 'url': 'https://' + event['media'][0]['url'], 32 | } 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/sztvhu.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class SztvHuIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P[0-9]+)' 9 | _TEST = { 10 | 'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', 11 | 'md5': 'a6df607b11fb07d0e9f2ad94613375cb', 12 | 'info_dict': { 13 | 'id': '20130909', 14 | 'ext': 'mp4', 15 | 'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren', 16 | 'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', 17 | }, 18 | } 19 | 20 | def _real_extract(self, url): 21 | video_id = self._match_id(url) 22 | webpage = self._download_webpage(url, video_id) 23 | video_file = self._search_regex( 24 | r'file: "...:(.*?)",', webpage, 'video file') 25 | title = self._html_search_regex( 26 | r'', 30 | webpage, 'video description', fatal=False) 31 | thumbnail = self._og_search_thumbnail(webpage) 32 | 33 | video_url = 'http://media.sztv.hu/vod/' + video_file 34 | 35 | return { 36 | 'id': video_id, 37 | 'url': video_url, 38 | 'title': title, 39 | 'description': description, 40 | 'thumbnail': thumbnail, 41 | } 42 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/tastytrade.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .ooyala import OoyalaIE 5 | 6 | 7 | class TastyTradeIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?tastytrade\.com/tt/shows/[^/]+/episodes/(?P[^/?#&]+)' 9 | 10 | _TESTS = [{ 11 | 'url': 'https://www.tastytrade.com/tt/shows/market-measures/episodes/correlation-in-short-volatility-06-28-2017', 12 | 'info_dict': { 13 | 'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM', 14 | 'ext': 'mp4', 15 | 'title': 'A History of Teaming', 16 | 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 17 | 'duration': 422.255, 18 | }, 19 | 'params': { 20 | 'skip_download': True, 21 | }, 22 | 'add_ie': ['Ooyala'], 23 | }, { 24 | 'url': 'https://www.tastytrade.com/tt/shows/daily-dose/episodes/daily-dose-06-30-2017', 25 | 'only_matching': True, 26 | }] 27 | 28 | def _real_extract(self, url): 29 | display_id = self._match_id(url) 30 | webpage = self._download_webpage(url, display_id) 31 | 32 | ooyala_code = self._search_regex( 33 | r'data-media-id=(["\'])(?P(?:(?!\1).)+)\1', 34 | webpage, 'ooyala code', group='code') 35 | 36 | info = self._search_json_ld(webpage, display_id, fatal=False) 37 | info.update({ 38 | '_type': 'url_transparent', 39 | 'ie_key': OoyalaIE.ie_key(), 40 | 'url': 'ooyala:%s' % ooyala_code, 41 | 'display_id': display_id, 42 | }) 43 | return info 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/teachingchannel.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class TeachingChannelIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?teachingchannel\.org/videos?/(?P[^/?&#]+)' 8 | 9 | _TEST = { 10 | 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution', 11 | 'info_dict': { 12 | 'id': '3swwlzkT', 13 | 'ext': 'mp4', 14 | 'title': 'A History of Teaming', 15 | 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 16 | 'duration': 422, 17 | 'upload_date': '20170316', 18 | 'timestamp': 1489691297, 19 | }, 20 | 'params': { 21 | 'skip_download': True, 22 | }, 23 | 'add_ie': ['JWPlatform'], 24 | } 25 | 26 | def _real_extract(self, url): 27 | display_id = self._match_id(url) 28 | webpage = self._download_webpage(url, display_id) 29 | mid = self._search_regex( 30 | r'(?:data-mid=["\']|id=["\']jw-video-player-)([a-zA-Z0-9]{8})', 31 | webpage, 'media id') 32 | 33 | return self.url_result('jwplatform:' + mid, 'JWPlatform', mid) 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/thescene.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | from ..compat import compat_urlparse 6 | 7 | 8 | class TheSceneIE(InfoExtractor): 9 | _VALID_URL = r'https?://thescene\.com/watch/[^/]+/(?P[^/#?]+)' 10 | 11 | _TEST = { 12 | 'url': 'https://thescene.com/watch/vogue/narciso-rodriguez-spring-2013-ready-to-wear', 13 | 'info_dict': { 14 | 'id': '520e8faac2b4c00e3c6e5f43', 15 | 'ext': 'mp4', 16 | 'title': 'Narciso Rodriguez: Spring 2013 Ready-to-Wear', 17 | 'display_id': 'narciso-rodriguez-spring-2013-ready-to-wear', 18 | 'duration': 127, 19 | 'series': 'Style.com Fashion Shows', 20 | 'season': 'Ready To Wear Spring 2013', 21 | 'tags': list, 22 | 'categories': list, 23 | 'upload_date': '20120913', 24 | 'timestamp': 1347512400, 25 | 'uploader': 'vogue', 26 | }, 27 | } 28 | 29 | def _real_extract(self, url): 30 | display_id = self._match_id(url) 31 | 32 | webpage = self._download_webpage(url, display_id) 33 | 34 | player_url = compat_urlparse.urljoin( 35 | url, 36 | self._html_search_regex( 37 | r'id=\'js-player-script\'[^>]+src=\'(.+?)\'', webpage, 'player url')) 38 | 39 | return { 40 | '_type': 'url_transparent', 41 | 'display_id': display_id, 42 | 'url': player_url, 43 | 'ie_key': 'CondeNast', 44 | } 45 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/thestar.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class TheStarIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?thestar\.com/(?:[^/]+/)*(?P.+)\.html' 9 | _TEST = { 10 | 'url': 'http://www.thestar.com/life/2016/02/01/mankind-why-this-woman-started-a-men-s-skincare-line.html', 11 | 'md5': '2c62dd4db2027e35579fefb97a8b6554', 12 | 'info_dict': { 13 | 'id': '4732393888001', 14 | 'ext': 'mp4', 15 | 'title': 'Mankind: Why this woman started a men\'s skin care line', 16 | 'description': 'Robert Cribb talks to Young Lee, the founder of Uncle Peter\'s MAN.', 17 | 'uploader_id': '794267642001', 18 | 'timestamp': 1454353482, 19 | 'upload_date': '20160201', 20 | }, 21 | 'params': { 22 | # m3u8 download 23 | 'skip_download': True, 24 | } 25 | } 26 | BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/794267642001/default_default/index.html?videoId=%s' 27 | 28 | def _real_extract(self, url): 29 | display_id = self._match_id(url) 30 | webpage = self._download_webpage(url, display_id) 31 | brightcove_id = self._search_regex( 32 | r'mainartBrightcoveVideoId["\']?\s*:\s*["\']?(\d+)', 33 | webpage, 'brightcove id') 34 | return self.url_result( 35 | self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 36 | 'BrightcoveNew', brightcove_id) 37 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/thesun.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import re 4 | 5 | from .common import InfoExtractor 6 | from ..utils import extract_attributes 7 | 8 | 9 | class TheSunIE(InfoExtractor): 10 | _VALID_URL = r'https://(?:www\.)?thesun\.co\.uk/[^/]+/(?P\d+)' 11 | _TEST = { 12 | 'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/', 13 | 'info_dict': { 14 | 'id': '2261604', 15 | 'title': 'md5:cba22f48bad9218b64d5bbe0e16afddf', 16 | }, 17 | 'playlist_count': 2, 18 | } 19 | BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' 20 | 21 | def _real_extract(self, url): 22 | article_id = self._match_id(url) 23 | 24 | webpage = self._download_webpage(url, article_id) 25 | 26 | entries = [] 27 | for video in re.findall( 28 | r']+data-video-id-pending=[^>]+>', 29 | webpage): 30 | attrs = extract_attributes(video) 31 | video_id = attrs['data-video-id-pending'] 32 | account_id = attrs.get('data-account', '5067014667001') 33 | entries.append(self.url_result( 34 | self.BRIGHTCOVE_URL_TEMPLATE % (account_id, video_id), 35 | 'BrightcoveNew', video_id)) 36 | 37 | return self.playlist_result( 38 | entries, article_id, self._og_search_title(webpage, fatal=False)) 39 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/thisamericanlife.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class ThisAmericanLifeIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P\d+)' 8 | _TESTS = [{ 9 | 'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one', 10 | 'md5': '8f7d2da8926298fdfca2ee37764c11ce', 11 | 'info_dict': { 12 | 'id': '487', 13 | 'ext': 'm4a', 14 | 'title': '487: Harper High School, Part One', 15 | 'description': 'md5:ee40bdf3fb96174a9027f76dbecea655', 16 | 'thumbnail': r're:^https?://.*\.jpg$', 17 | }, 18 | }, { 19 | 'url': 'http://www.thisamericanlife.org/play_full.php?play=487', 20 | 'only_matching': True, 21 | }] 22 | 23 | def _real_extract(self, url): 24 | video_id = self._match_id(url) 25 | 26 | webpage = self._download_webpage( 27 | 'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id) 28 | 29 | return { 30 | 'id': video_id, 31 | 'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id), 32 | 'protocol': 'm3u8_native', 33 | 'ext': 'm4a', 34 | 'acodec': 'aac', 35 | 'vcodec': 'none', 36 | 'abr': 64, 37 | 'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True), 38 | 'description': self._html_search_meta(r'description', webpage, 'description'), 39 | 'thumbnail': self._og_search_thumbnail(webpage), 40 | } 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/trunews.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class TruNewsIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www\.)?trunews\.com/stream/(?P[^/?#&]+)' 8 | _TEST = { 9 | 'url': 'https://www.trunews.com/stream/will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 10 | 'info_dict': { 11 | 'id': '5c5a21e65d3c196e1c0020cc', 12 | 'display_id': 'will-democrats-stage-a-circus-during-president-trump-s-state-of-the-union-speech', 13 | 'ext': 'mp4', 14 | 'title': "Will Democrats Stage a Circus During President Trump's State of the Union Speech?", 15 | 'description': 'md5:c583b72147cc92cf21f56a31aff7a670', 16 | 'duration': 3685, 17 | 'timestamp': 1549411440, 18 | 'upload_date': '20190206', 19 | }, 20 | 'add_ie': ['Zype'], 21 | } 22 | _ZYPE_TEMPL = 'https://player.zype.com/embed/%s.js?api_key=X5XnahkjCwJrT_l5zUqypnaLEObotyvtUKJWWlONxDoHVjP8vqxlArLV8llxMbyt' 23 | 24 | def _real_extract(self, url): 25 | display_id = self._match_id(url) 26 | 27 | zype_id = self._download_json( 28 | 'https://api.zype.com/videos', display_id, query={ 29 | 'app_key': 'PUVKp9WgGUb3-JUw6EqafLx8tFVP6VKZTWbUOR-HOm__g4fNDt1bCsm_LgYf_k9H', 30 | 'per_page': 1, 31 | 'active': 'true', 32 | 'friendly_title': display_id, 33 | })['response'][0]['_id'] 34 | return self.url_result(self._ZYPE_TEMPL % zype_id, 'Zype', zype_id) 35 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/tvland.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .spike import ParamountNetworkIE 5 | 6 | # TODO: Remove - Reason not used anymore - Service moved to youtube 7 | 8 | 9 | class TVLandIE(ParamountNetworkIE): 10 | IE_NAME = 'tvland.com' 11 | _VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P[^/?#.]+)' 12 | _FEED_URL = 'http://www.tvland.com/feeds/mrss/' 13 | _TESTS = [{ 14 | # Geo-restricted. Without a proxy metadata are still there. With a 15 | # proxy it redirects to http://m.tvland.com/app/ 16 | 'url': 'https://www.tvland.com/episodes/s04pzf/everybody-loves-raymond-the-dog-season-1-ep-19', 17 | 'info_dict': { 18 | 'description': 'md5:84928e7a8ad6649371fbf5da5e1ad75a', 19 | 'title': 'The Dog', 20 | }, 21 | 'playlist_mincount': 5, 22 | }, { 23 | 'url': 'https://www.tvland.com/video-clips/4n87f2/younger-a-first-look-at-younger-season-6', 24 | 'md5': 'e2c6389401cf485df26c79c247b08713', 25 | 'info_dict': { 26 | 'id': '891f7d3c-5b5b-4753-b879-b7ba1a601757', 27 | 'ext': 'mp4', 28 | 'title': 'Younger|April 30, 2019|6|NO-EPISODE#|A First Look at Younger Season 6', 29 | 'description': 'md5:595ea74578d3a888ae878dfd1c7d4ab2', 30 | 'upload_date': '20190430', 31 | 'timestamp': 1556658000, 32 | }, 33 | 'params': { 34 | 'skip_download': True, 35 | }, 36 | }, { 37 | 'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301', 38 | 'only_matching': True, 39 | }] 40 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/tvnoe.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import ( 6 | clean_html, 7 | get_element_by_class, 8 | js_to_json, 9 | ) 10 | 11 | 12 | class TVNoeIE(InfoExtractor): 13 | _VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P[0-9]+)' 14 | _TEST = { 15 | 'url': 'http://www.tvnoe.cz/video/10362', 16 | 'md5': 'aee983f279aab96ec45ab6e2abb3c2ca', 17 | 'info_dict': { 18 | 'id': '10362', 19 | 'ext': 'mp4', 20 | 'series': 'Noční univerzita', 21 | 'title': 'prof. Tomáš Halík, Th.D. - Návrat náboženství a střet civilizací', 22 | 'description': 'md5:f337bae384e1a531a52c55ebc50fff41', 23 | } 24 | } 25 | 26 | def _real_extract(self, url): 27 | video_id = self._match_id(url) 28 | webpage = self._download_webpage(url, video_id) 29 | 30 | iframe_url = self._search_regex( 31 | r']+src="([^"]+)"', webpage, 'iframe URL') 32 | 33 | ifs_page = self._download_webpage(iframe_url, video_id) 34 | jwplayer_data = self._find_jwplayer_data( 35 | ifs_page, video_id, transform_source=js_to_json) 36 | info_dict = self._parse_jwplayer_data( 37 | jwplayer_data, video_id, require_title=False, base_url=iframe_url) 38 | 39 | info_dict.update({ 40 | 'id': video_id, 41 | 'title': clean_html(get_element_by_class( 42 | 'field-name-field-podnazev', webpage)), 43 | 'description': clean_html(get_element_by_class( 44 | 'field-name-body', webpage)), 45 | 'series': clean_html(get_element_by_class('title', webpage)) 46 | }) 47 | 48 | return info_dict 49 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/ufctv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .imggaming import ImgGamingBaseIE 5 | 6 | 7 | class UFCTVIE(ImgGamingBaseIE): 8 | _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?(?:ufc\.tv|(?:ufc)?fightpass\.com)|ufcfightpass\.img(?:dge|gaming)\.com' 9 | _NETRC_MACHINE = 'ufctv' 10 | _REALM = 'ufc' 11 | 12 | 13 | class UFCArabiaIE(ImgGamingBaseIE): 14 | _VALID_URL = ImgGamingBaseIE._VALID_URL_TEMPL % r'(?:(?:app|www)\.)?ufcarabia\.(?:ae|com)' 15 | _NETRC_MACHINE = 'ufcarabia' 16 | _REALM = 'admufc' 17 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/uktvplay.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class UKTVPlayIE(InfoExtractor): 8 | _VALID_URL = r'https?://uktvplay\.uktv\.co\.uk/.+?\?.*?\bvideo=(?P\d+)' 9 | _TEST = { 10 | 'url': 'https://uktvplay.uktv.co.uk/shows/world-at-war/c/200/watch-online/?video=2117008346001', 11 | 'md5': '', 12 | 'info_dict': { 13 | 'id': '2117008346001', 14 | 'ext': 'mp4', 15 | 'title': 'Pincers', 16 | 'description': 'Pincers', 17 | 'uploader_id': '1242911124001', 18 | 'upload_date': '20130124', 19 | 'timestamp': 1359049267, 20 | }, 21 | 'params': { 22 | # m3u8 download 23 | 'skip_download': True, 24 | }, 25 | 'expected_warnings': ['Failed to download MPD manifest'] 26 | } 27 | BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911124001/H1xnMOqP_default/index.html?videoId=%s' 28 | 29 | def _real_extract(self, url): 30 | video_id = self._match_id(url) 31 | return self.url_result( 32 | self.BRIGHTCOVE_URL_TEMPLATE % video_id, 33 | 'BrightcoveNew', video_id) 34 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/unity.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .youtube import YoutubeIE 5 | 6 | 7 | class UnityIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P[^/?#&]+)' 9 | _TESTS = [{ 10 | 'url': 'https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim', 11 | 'info_dict': { 12 | 'id': 'jWuNtik0C8E', 13 | 'ext': 'mp4', 14 | 'title': 'Live Training 22nd September 2014 - Animate Anything', 15 | 'description': 'md5:e54913114bd45a554c56cdde7669636e', 16 | 'duration': 2893, 17 | 'uploader': 'Unity', 18 | 'uploader_id': 'Unity3D', 19 | 'upload_date': '20140926', 20 | } 21 | }, { 22 | 'url': 'https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844', 23 | 'only_matching': True, 24 | }] 25 | 26 | def _real_extract(self, url): 27 | video_id = self._match_id(url) 28 | webpage = self._download_webpage(url, video_id) 29 | youtube_id = self._search_regex( 30 | r'data-video-id="([_0-9a-zA-Z-]+)"', 31 | webpage, 'youtube ID') 32 | return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id) 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/usanetwork.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .nbc import NBCIE 5 | 6 | 7 | class USANetworkIE(NBCIE): 8 | _VALID_URL = r'https?(?P://(?:www\.)?usanetwork\.com/(?:[^/]+/videos?|movies?)/(?:[^/]+/)?(?P\d+))' 9 | _TESTS = [{ 10 | 'url': 'https://www.usanetwork.com/peacock-trailers/video/intelligence-trailer/4185302', 11 | 'info_dict': { 12 | 'id': '4185302', 13 | 'ext': 'mp4', 14 | 'title': 'Intelligence (Trailer)', 15 | 'description': 'A maverick NSA agent enlists the help of a junior systems analyst in a workplace power grab.', 16 | 'upload_date': '20200715', 17 | 'timestamp': 1594785600, 18 | 'uploader': 'NBCU-MPAT', 19 | }, 20 | 'params': { 21 | # m3u8 download 22 | 'skip_download': True, 23 | }, 24 | }] 25 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/vh1.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .mtv import MTVServicesInfoExtractor 5 | 6 | # TODO Remove - Reason: Outdated Site 7 | 8 | 9 | class VH1IE(MTVServicesInfoExtractor): 10 | IE_NAME = 'vh1.com' 11 | _FEED_URL = 'http://www.vh1.com/feeds/mrss/' 12 | _TESTS = [{ 13 | 'url': 'http://www.vh1.com/episodes/0umwpq/hip-hop-squares-kent-jones-vs-nick-young-season-1-ep-120', 14 | 'info_dict': { 15 | 'title': 'Kent Jones vs. Nick Young', 16 | 'description': 'Come to Play. Stay to Party. With Mike Epps, TIP, O’Shea Jackson Jr., T-Pain, Tisha Campbell-Martin and more.', 17 | }, 18 | 'playlist_mincount': 4, 19 | }, { 20 | # Clip 21 | 'url': 'http://www.vh1.com/video-clips/t74mif/scared-famous-scared-famous-extended-preview', 22 | 'info_dict': { 23 | 'id': '0a50c2d2-a86b-4141-9565-911c7e2d0b92', 24 | 'ext': 'mp4', 25 | 'title': 'Scared Famous|October 9, 2017|1|NO-EPISODE#|Scared Famous + Extended Preview', 26 | 'description': 'md5:eff5551a274c473a29463de40f7b09da', 27 | 'upload_date': '20171009', 28 | 'timestamp': 1507574700, 29 | }, 30 | 'params': { 31 | # m3u8 download 32 | 'skip_download': True, 33 | }, 34 | }] 35 | 36 | _VALID_URL = r'https?://(?:www\.)?vh1\.com/(?:video-clips|episodes)/(?P[^/?#.]+)' 37 | 38 | def _real_extract(self, url): 39 | playlist_id = self._match_id(url) 40 | webpage = self._download_webpage(url, playlist_id) 41 | mgid = self._extract_triforce_mgid(webpage) 42 | videos_info = self._get_videos_info(mgid) 43 | return videos_info 44 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/videodetective.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from .internetvideoarchive import InternetVideoArchiveIE 5 | 6 | 7 | class VideoDetectiveIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?videodetective\.com/[^/]+/[^/]+/(?P\d+)' 9 | 10 | _TEST = { 11 | 'url': 'http://www.videodetective.com/movies/kick-ass-2/194487', 12 | 'info_dict': { 13 | 'id': '194487', 14 | 'ext': 'mp4', 15 | 'title': 'Kick-Ass 2', 16 | 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', 17 | }, 18 | 'params': { 19 | # m3u8 download 20 | 'skip_download': True, 21 | }, 22 | } 23 | 24 | def _real_extract(self, url): 25 | video_id = self._match_id(url) 26 | query = 'customerid=69249&publishedid=' + video_id 27 | return self.url_result( 28 | InternetVideoArchiveIE._build_json_url(query), 29 | ie=InternetVideoArchiveIE.ie_key()) 30 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/vodpl.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .onet import OnetBaseIE 5 | 6 | 7 | class VODPlIE(OnetBaseIE): 8 | _VALID_URL = r'https?://vod\.pl/(?:[^/]+/)+(?P[0-9a-zA-Z]+)' 9 | 10 | _TESTS = [{ 11 | 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', 12 | 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', 13 | 'info_dict': { 14 | 'id': '3ep3jns', 15 | 'ext': 'mp4', 16 | 'title': 'Chłopaki nie płaczą', 17 | 'description': 'md5:f5f03b84712e55f5ac9f0a3f94445224', 18 | 'timestamp': 1463415154, 19 | 'duration': 5765, 20 | 'upload_date': '20160516', 21 | }, 22 | }, { 23 | 'url': 'https://vod.pl/seriale/belfer-na-planie-praca-kamery-online/2c10heh', 24 | 'only_matching': True, 25 | }] 26 | 27 | def _real_extract(self, url): 28 | video_id = self._match_id(url) 29 | webpage = self._download_webpage(url, video_id) 30 | info_dict = self._extract_from_id(self._search_mvp_id(webpage), webpage) 31 | info_dict['id'] = video_id 32 | return info_dict 33 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/vodplatform.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import unescapeHTML 6 | 7 | 8 | class VODPlatformIE(InfoExtractor): 9 | _VALID_URL = r'https?://(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/(?P[^/?#]+)' 10 | _TESTS = [{ 11 | # from http://www.lbcgroup.tv/watch/chapter/29143/52844/%D8%A7%D9%84%D9%86%D8%B5%D8%B1%D8%A9-%D9%81%D9%8A-%D8%B6%D9%8A%D8%A7%D9%81%D8%A9-%D8%A7%D9%84%D9%80-cnn/ar 12 | 'url': 'http://vod-platform.net/embed/RufMcytHDolTH1MuKHY9Fw', 13 | 'md5': '1db2b7249ce383d6be96499006e951fc', 14 | 'info_dict': { 15 | 'id': 'RufMcytHDolTH1MuKHY9Fw', 16 | 'ext': 'mp4', 17 | 'title': 'LBCi News_ النصرة في ضيافة الـ "سي.أن.أن"', 18 | } 19 | }, { 20 | 'url': 'http://embed.kwikmotion.com/embed/RufMcytHDolTH1MuKHY9Fw', 21 | 'only_matching': True, 22 | }] 23 | 24 | def _real_extract(self, url): 25 | video_id = self._match_id(url) 26 | webpage = self._download_webpage(url, video_id) 27 | 28 | title = unescapeHTML(self._og_search_title(webpage)) 29 | hidden_inputs = self._hidden_inputs(webpage) 30 | 31 | formats = self._extract_wowza_formats( 32 | hidden_inputs.get('HiddenmyhHlsLink') or hidden_inputs['HiddenmyDashLink'], video_id, skip_protocols=['f4m', 'smil']) 33 | self._sort_formats(formats) 34 | 35 | return { 36 | 'id': video_id, 37 | 'title': title, 38 | 'thumbnail': hidden_inputs.get('HiddenThumbnail') or self._og_search_thumbnail(webpage), 39 | 'formats': formats, 40 | } 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/weiqitv.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | 6 | 7 | class WeiqiTVIE(InfoExtractor): 8 | IE_DESC = 'WQTV' 9 | _VALID_URL = r'https?://(?:www\.)?weiqitv\.com/index/video_play\?videoId=(?P[A-Za-z0-9]+)' 10 | 11 | _TESTS = [{ 12 | 'url': 'http://www.weiqitv.com/index/video_play?videoId=53c744f09874f0e76a8b46f3', 13 | 'md5': '26450599afd64c513bc77030ad15db44', 14 | 'info_dict': { 15 | 'id': '53c744f09874f0e76a8b46f3', 16 | 'ext': 'mp4', 17 | 'title': '2013年度盘点', 18 | }, 19 | }, { 20 | 'url': 'http://www.weiqitv.com/index/video_play?videoId=567379a2d4c36cca518b4569', 21 | 'info_dict': { 22 | 'id': '567379a2d4c36cca518b4569', 23 | 'ext': 'mp4', 24 | 'title': '民国围棋史', 25 | }, 26 | }, { 27 | 'url': 'http://www.weiqitv.com/index/video_play?videoId=5430220a9874f088658b4567', 28 | 'info_dict': { 29 | 'id': '5430220a9874f088658b4567', 30 | 'ext': 'mp4', 31 | 'title': '二路托过的手段和运用', 32 | }, 33 | }] 34 | 35 | def _real_extract(self, url): 36 | media_id = self._match_id(url) 37 | page = self._download_webpage(url, media_id) 38 | 39 | info_json_str = self._search_regex( 40 | r'var\s+video\s*=\s*(.+});', page, 'info json str') 41 | info_json = self._parse_json(info_json_str, media_id) 42 | 43 | letvcloud_url = self._search_regex( 44 | r'var\s+letvurl\s*=\s*"([^"]+)', page, 'letvcloud url') 45 | 46 | return { 47 | '_type': 'url_transparent', 48 | 'ie_key': 'LetvCloud', 49 | 'url': letvcloud_url, 50 | 'title': info_json['name'], 51 | 'id': media_id, 52 | } 53 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/worldstarhiphop.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | 5 | 6 | class WorldStarHipHopIE(InfoExtractor): 7 | _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?.*?\bv=(?P[^&]+)' 8 | _TESTS = [{ 9 | 'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO', 10 | 'md5': '9d04de741161603bf7071bbf4e883186', 11 | 'info_dict': { 12 | 'id': 'wshh6a7q1ny0G34ZwuIO', 13 | 'ext': 'mp4', 14 | 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!' 15 | } 16 | }, { 17 | 'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO', 18 | 'only_matching': True, 19 | }] 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | webpage = self._download_webpage(url, video_id) 24 | 25 | entries = self._parse_html5_media_entries(url, webpage, video_id) 26 | 27 | if not entries: 28 | return self.url_result(url, 'Generic') 29 | 30 | title = self._html_search_regex( 31 | [r'(?s)
    \s*

    (.*?)

    ', 32 | r']+class="tc-sp-pinned-title">(.*)'], 33 | webpage, 'title') 34 | 35 | info = entries[0] 36 | info.update({ 37 | 'id': video_id, 38 | 'title': title, 39 | }) 40 | return info 41 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/xbef.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .common import InfoExtractor 4 | from ..compat import compat_urllib_parse_unquote 5 | 6 | 7 | class XBefIE(InfoExtractor): 8 | _VALID_URL = r'https?://(?:www\.)?xbef\.com/video/(?P[0-9]+)' 9 | _TEST = { 10 | 'url': 'http://xbef.com/video/5119-glamourous-lesbians-smoking-drinking-and-fucking', 11 | 'md5': 'a478b565baff61634a98f5e5338be995', 12 | 'info_dict': { 13 | 'id': '5119', 14 | 'ext': 'mp4', 15 | 'title': 'md5:7358a9faef8b7b57acda7c04816f170e', 16 | 'age_limit': 18, 17 | 'thumbnail': r're:^http://.*\.jpg', 18 | } 19 | } 20 | 21 | def _real_extract(self, url): 22 | video_id = self._match_id(url) 23 | webpage = self._download_webpage(url, video_id) 24 | 25 | title = self._html_search_regex( 26 | r']*>(.*?)

    ', webpage, 'title') 27 | 28 | config_url_enc = self._download_webpage( 29 | 'http://xbef.com/Main/GetVideoURLEncoded/%s' % video_id, video_id, 30 | note='Retrieving config URL') 31 | config_url = compat_urllib_parse_unquote(config_url_enc) 32 | config = self._download_xml( 33 | config_url, video_id, note='Retrieving config') 34 | 35 | video_url = config.find('./file').text 36 | thumbnail = config.find('./image').text 37 | 38 | return { 39 | 'id': video_id, 40 | 'url': video_url, 41 | 'title': title, 42 | 'thumbnail': thumbnail, 43 | 'age_limit': 18, 44 | } 45 | -------------------------------------------------------------------------------- /youtube_dlc/extractor/yourupload.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | from __future__ import unicode_literals 3 | 4 | from .common import InfoExtractor 5 | from ..utils import urljoin 6 | 7 | 8 | class YourUploadIE(InfoExtractor): 9 | _VALID_URL = r'https?://(?:www\.)?(?:yourupload\.com/(?:watch|embed)|embed\.yourupload\.com)/(?P[A-Za-z0-9]+)' 10 | _TESTS = [{ 11 | 'url': 'http://yourupload.com/watch/14i14h', 12 | 'md5': '5e2c63385454c557f97c4c4131a393cd', 13 | 'info_dict': { 14 | 'id': '14i14h', 15 | 'ext': 'mp4', 16 | 'title': 'BigBuckBunny_320x180.mp4', 17 | 'thumbnail': r're:^https?://.*\.jpe?g', 18 | } 19 | }, { 20 | 'url': 'http://www.yourupload.com/embed/14i14h', 21 | 'only_matching': True, 22 | }, { 23 | 'url': 'http://embed.yourupload.com/14i14h', 24 | 'only_matching': True, 25 | }] 26 | 27 | def _real_extract(self, url): 28 | video_id = self._match_id(url) 29 | 30 | embed_url = 'http://www.yourupload.com/embed/%s' % video_id 31 | 32 | webpage = self._download_webpage(embed_url, video_id) 33 | 34 | title = self._og_search_title(webpage) 35 | video_url = urljoin(embed_url, self._og_search_video_url(webpage)) 36 | thumbnail = self._og_search_thumbnail(webpage, default=None) 37 | 38 | return { 39 | 'id': video_id, 40 | 'title': title, 41 | 'url': video_url, 42 | 'thumbnail': thumbnail, 43 | 'http_headers': { 44 | 'Referer': embed_url, 45 | }, 46 | } 47 | -------------------------------------------------------------------------------- /youtube_dlc/postprocessor/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from .embedthumbnail import EmbedThumbnailPP 4 | from .ffmpeg import ( 5 | FFmpegPostProcessor, 6 | FFmpegEmbedSubtitlePP, 7 | FFmpegExtractAudioPP, 8 | FFmpegFixupStretchedPP, 9 | FFmpegFixupM3u8PP, 10 | FFmpegFixupM4aPP, 11 | FFmpegMergerPP, 12 | FFmpegMetadataPP, 13 | FFmpegVideoConvertorPP, 14 | FFmpegVideoRemuxerPP, 15 | FFmpegSubtitlesConvertorPP, 16 | ) 17 | from .xattrpp import XAttrMetadataPP 18 | from .execafterdownload import ExecAfterDownloadPP 19 | from .metadatafromtitle import MetadataFromTitlePP 20 | 21 | 22 | def get_postprocessor(key): 23 | return globals()[key + 'PP'] 24 | 25 | 26 | __all__ = [ 27 | 'EmbedThumbnailPP', 28 | 'ExecAfterDownloadPP', 29 | 'FFmpegEmbedSubtitlePP', 30 | 'FFmpegExtractAudioPP', 31 | 'FFmpegFixupM3u8PP', 32 | 'FFmpegFixupM4aPP', 33 | 'FFmpegFixupStretchedPP', 34 | 'FFmpegMergerPP', 35 | 'FFmpegMetadataPP', 36 | 'FFmpegPostProcessor', 37 | 'FFmpegSubtitlesConvertorPP', 38 | 'FFmpegVideoConvertorPP', 39 | 'FFmpegVideoRemuxerPP', 40 | 'MetadataFromTitlePP', 41 | 'XAttrMetadataPP', 42 | ] 43 | -------------------------------------------------------------------------------- /youtube_dlc/postprocessor/execafterdownload.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import subprocess 4 | 5 | from .common import PostProcessor 6 | from ..compat import compat_shlex_quote 7 | from ..utils import ( 8 | encodeArgument, 9 | PostProcessingError, 10 | ) 11 | 12 | 13 | class ExecAfterDownloadPP(PostProcessor): 14 | def __init__(self, downloader, exec_cmd): 15 | super(ExecAfterDownloadPP, self).__init__(downloader) 16 | self.exec_cmd = exec_cmd 17 | 18 | def run(self, information): 19 | cmd = self.exec_cmd 20 | if '{}' not in cmd: 21 | cmd += ' {}' 22 | 23 | cmd = cmd.replace('{}', compat_shlex_quote(information['filepath'])) 24 | 25 | self._downloader.to_screen('[exec] Executing command: %s' % cmd) 26 | retCode = subprocess.call(encodeArgument(cmd), shell=True) 27 | if retCode != 0: 28 | raise PostProcessingError( 29 | 'Command returned error code %d' % retCode) 30 | 31 | return [], information 32 | -------------------------------------------------------------------------------- /youtube_dlc/version.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | __version__ = '2020.11.11-2' 4 | --------------------------------------------------------------------------------