1
0
mirror of https://code.hackerspace.pl/q3k/youtube-dl synced 2025-03-16 11:43:02 +00:00

Convert all tabs to 4 spaces (PEP8)

This commit is contained in:
Philipp Hagemeister 2012-11-28 02:04:46 +01:00
parent 40b35b4aa6
commit 59ae15a507
9 changed files with 4841 additions and 4841 deletions

View File

@ -13,186 +13,186 @@ from youtube_dl.InfoExtractors import CollegeHumorIE, XNXXIE
class DownloadTest(unittest.TestCase): class DownloadTest(unittest.TestCase):
PARAMETERS_FILE = "test/parameters.json" PARAMETERS_FILE = "test/parameters.json"
#calculated with md5sum: #calculated with md5sum:
#md5sum (GNU coreutils) 8.19 #md5sum (GNU coreutils) 8.19
YOUTUBE_SIZE = 1993883 YOUTUBE_SIZE = 1993883
YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc" YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc"
YOUTUBE_FILE = "BaW_jenozKc.mp4" YOUTUBE_FILE = "BaW_jenozKc.mp4"
DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47" DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47"
DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech" DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech"
DAILYMOTION_FILE = "x33vw9.mp4" DAILYMOTION_FILE = "x33vw9.mp4"
METACAFE_SIZE = 5754305 METACAFE_SIZE = 5754305
METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/" METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/"
METACAFE_FILE = "_aUehQsCQtM.flv" METACAFE_FILE = "_aUehQsCQtM.flv"
BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7" BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7"
BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352" BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352"
BLIP_FILE = "5779306.m4v" BLIP_FILE = "5779306.m4v"
XVIDEO_MD5 = "1ab4dedc01f771cb2a65e91caa801aaf" XVIDEO_MD5 = "1ab4dedc01f771cb2a65e91caa801aaf"
XVIDEO_URL = "http://www.xvideos.com/video939581/funny_porns_by_s_-1" XVIDEO_URL = "http://www.xvideos.com/video939581/funny_porns_by_s_-1"
XVIDEO_FILE = "939581.flv" XVIDEO_FILE = "939581.flv"
VIMEO_MD5 = "1ab4dedc01f771cb2a65e91caa801aaf" VIMEO_MD5 = "1ab4dedc01f771cb2a65e91caa801aaf"
VIMEO_URL = "http://vimeo.com/14160053" VIMEO_URL = "http://vimeo.com/14160053"
VIMEO_FILE = "" VIMEO_FILE = ""
VIMEO2_MD5 = "" VIMEO2_MD5 = ""
VIMEO2_URL = "http://player.vimeo.com/video/47019590" VIMEO2_URL = "http://player.vimeo.com/video/47019590"
VIMEO2_FILE = "" VIMEO2_FILE = ""
SOUNDCLOUD_MD5 = "ce3775768ebb6432fa8495d446a078ed" SOUNDCLOUD_MD5 = "ce3775768ebb6432fa8495d446a078ed"
SOUNDCLOUD_URL = "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy" SOUNDCLOUD_URL = "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy"
SOUNDCLOUD_FILE = "n6FLbx6ZzMiu.mp3" SOUNDCLOUD_FILE = "n6FLbx6ZzMiu.mp3"
STANDFORD_MD5 = "22c8206291368c4e2c9c1a307f0ea0f4" STANDFORD_MD5 = "22c8206291368c4e2c9c1a307f0ea0f4"
STANDFORD_URL = "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100" STANDFORD_URL = "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100"
STANDFORD_FILE = "PracticalUnix_intro-environment.mp4" STANDFORD_FILE = "PracticalUnix_intro-environment.mp4"
COLLEGEHUMOR_MD5 = "" COLLEGEHUMOR_MD5 = ""
COLLEGEHUMOR_URL = "http://www.collegehumor.com/video/6830834/mitt-romney-style-gangnam-style-parody" COLLEGEHUMOR_URL = "http://www.collegehumor.com/video/6830834/mitt-romney-style-gangnam-style-parody"
COLLEGEHUMOR_FILE = "" COLLEGEHUMOR_FILE = ""
XNXX_MD5 = "5f0469c8d1dfd1bc38c8e6deb5e0a21d" XNXX_MD5 = "5f0469c8d1dfd1bc38c8e6deb5e0a21d"
XNXX_URL = "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_" XNXX_URL = "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_"
XNXX_FILE = "1135332.flv" XNXX_FILE = "1135332.flv"
def test_youtube(self): def test_youtube(self):
#let's download a file from youtube #let's download a file from youtube
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(YoutubeIE()) fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.YOUTUBE_URL]) fd.download([DownloadTest.YOUTUBE_URL])
self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE)) self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE) self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE)
def test_dailymotion(self): def test_dailymotion(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(DailymotionIE()) fd.add_info_extractor(DailymotionIE())
fd.download([DownloadTest.DAILYMOTION_URL]) fd.download([DownloadTest.DAILYMOTION_URL])
self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE)) self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE))
md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE) md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE)
self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5) self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5)
def test_metacafe(self): def test_metacafe(self):
#this emulate a skip,to be 2.6 compatible #this emulate a skip,to be 2.6 compatible
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(MetacafeIE()) fd.add_info_extractor(MetacafeIE())
fd.add_info_extractor(YoutubeIE()) fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.METACAFE_URL]) fd.download([DownloadTest.METACAFE_URL])
self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE)) self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE) self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE)
def test_blip(self): def test_blip(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(BlipTVIE()) fd.add_info_extractor(BlipTVIE())
fd.download([DownloadTest.BLIP_URL]) fd.download([DownloadTest.BLIP_URL])
self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE)) self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE))
md5_down_file = md5_for_file(DownloadTest.BLIP_FILE) md5_down_file = md5_for_file(DownloadTest.BLIP_FILE)
self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5) self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5)
def test_xvideo(self): def test_xvideo(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(XVideosIE()) fd.add_info_extractor(XVideosIE())
fd.download([DownloadTest.XVIDEO_URL]) fd.download([DownloadTest.XVIDEO_URL])
self.assertTrue(os.path.exists(DownloadTest.XVIDEO_FILE)) self.assertTrue(os.path.exists(DownloadTest.XVIDEO_FILE))
md5_down_file = md5_for_file(DownloadTest.XVIDEO_FILE) md5_down_file = md5_for_file(DownloadTest.XVIDEO_FILE)
self.assertEqual(md5_down_file, DownloadTest.XVIDEO_MD5) self.assertEqual(md5_down_file, DownloadTest.XVIDEO_MD5)
def test_vimeo(self): def test_vimeo(self):
#skipped for the moment produce an error #skipped for the moment produce an error
return return
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(VimeoIE()) fd.add_info_extractor(VimeoIE())
fd.download([DownloadTest.VIMEO_URL]) fd.download([DownloadTest.VIMEO_URL])
self.assertTrue(os.path.exists(DownloadTest.VIMEO_FILE)) self.assertTrue(os.path.exists(DownloadTest.VIMEO_FILE))
md5_down_file = md5_for_file(DownloadTest.VIMEO_FILE) md5_down_file = md5_for_file(DownloadTest.VIMEO_FILE)
self.assertEqual(md5_down_file, DownloadTest.VIMEO_MD5) self.assertEqual(md5_down_file, DownloadTest.VIMEO_MD5)
def test_vimeo2(self): def test_vimeo2(self):
#skipped for the moment produce an error #skipped for the moment produce an error
return return
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(VimeoIE()) fd.add_info_extractor(VimeoIE())
fd.download([DownloadTest.VIMEO2_URL]) fd.download([DownloadTest.VIMEO2_URL])
self.assertTrue(os.path.exists(DownloadTest.VIMEO2_FILE)) self.assertTrue(os.path.exists(DownloadTest.VIMEO2_FILE))
md5_down_file = md5_for_file(DownloadTest.VIMEO2_FILE) md5_down_file = md5_for_file(DownloadTest.VIMEO2_FILE)
self.assertEqual(md5_down_file, DownloadTest.VIMEO2_MD5) self.assertEqual(md5_down_file, DownloadTest.VIMEO2_MD5)
def test_soundcloud(self): def test_soundcloud(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(SoundcloudIE()) fd.add_info_extractor(SoundcloudIE())
fd.download([DownloadTest.SOUNDCLOUD_URL]) fd.download([DownloadTest.SOUNDCLOUD_URL])
self.assertTrue(os.path.exists(DownloadTest.SOUNDCLOUD_FILE)) self.assertTrue(os.path.exists(DownloadTest.SOUNDCLOUD_FILE))
md5_down_file = md5_for_file(DownloadTest.SOUNDCLOUD_FILE) md5_down_file = md5_for_file(DownloadTest.SOUNDCLOUD_FILE)
self.assertEqual(md5_down_file, DownloadTest.SOUNDCLOUD_MD5) self.assertEqual(md5_down_file, DownloadTest.SOUNDCLOUD_MD5)
def test_standford(self): def test_standford(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(StanfordOpenClassroomIE()) fd.add_info_extractor(StanfordOpenClassroomIE())
fd.download([DownloadTest.STANDFORD_URL]) fd.download([DownloadTest.STANDFORD_URL])
self.assertTrue(os.path.exists(DownloadTest.STANDFORD_FILE)) self.assertTrue(os.path.exists(DownloadTest.STANDFORD_FILE))
md5_down_file = md5_for_file(DownloadTest.STANDFORD_FILE) md5_down_file = md5_for_file(DownloadTest.STANDFORD_FILE)
self.assertEqual(md5_down_file, DownloadTest.STANDFORD_MD5) self.assertEqual(md5_down_file, DownloadTest.STANDFORD_MD5)
def test_collegehumor(self): def test_collegehumor(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(CollegeHumorIE()) fd.add_info_extractor(CollegeHumorIE())
fd.download([DownloadTest.COLLEGEHUMOR_URL]) fd.download([DownloadTest.COLLEGEHUMOR_URL])
self.assertTrue(os.path.exists(DownloadTest.COLLEGEHUMOR_FILE)) self.assertTrue(os.path.exists(DownloadTest.COLLEGEHUMOR_FILE))
md5_down_file = md5_for_file(DownloadTest.COLLEGEHUMOR_FILE) md5_down_file = md5_for_file(DownloadTest.COLLEGEHUMOR_FILE)
self.assertEqual(md5_down_file, DownloadTest.COLLEGEHUMOR_MD5) self.assertEqual(md5_down_file, DownloadTest.COLLEGEHUMOR_MD5)
def test_xnxx(self): def test_xnxx(self):
with open(DownloadTest.PARAMETERS_FILE) as f: with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f)) fd = FileDownloader(json.load(f))
fd.add_info_extractor(XNXXIE()) fd.add_info_extractor(XNXXIE())
fd.download([DownloadTest.XNXX_URL]) fd.download([DownloadTest.XNXX_URL])
self.assertTrue(os.path.exists(DownloadTest.XNXX_FILE)) self.assertTrue(os.path.exists(DownloadTest.XNXX_FILE))
md5_down_file = md5_for_file(DownloadTest.XNXX_FILE) md5_down_file = md5_for_file(DownloadTest.XNXX_FILE)
self.assertEqual(md5_down_file, DownloadTest.XNXX_MD5) self.assertEqual(md5_down_file, DownloadTest.XNXX_MD5)
def tearDown(self): def tearDown(self):
if os.path.exists(DownloadTest.YOUTUBE_FILE): if os.path.exists(DownloadTest.YOUTUBE_FILE):
os.remove(DownloadTest.YOUTUBE_FILE) os.remove(DownloadTest.YOUTUBE_FILE)
if os.path.exists(DownloadTest.DAILYMOTION_FILE): if os.path.exists(DownloadTest.DAILYMOTION_FILE):
os.remove(DownloadTest.DAILYMOTION_FILE) os.remove(DownloadTest.DAILYMOTION_FILE)
if os.path.exists(DownloadTest.METACAFE_FILE): if os.path.exists(DownloadTest.METACAFE_FILE):
os.remove(DownloadTest.METACAFE_FILE) os.remove(DownloadTest.METACAFE_FILE)
if os.path.exists(DownloadTest.BLIP_FILE): if os.path.exists(DownloadTest.BLIP_FILE):
os.remove(DownloadTest.BLIP_FILE) os.remove(DownloadTest.BLIP_FILE)
if os.path.exists(DownloadTest.XVIDEO_FILE): if os.path.exists(DownloadTest.XVIDEO_FILE):
os.remove(DownloadTest.XVIDEO_FILE) os.remove(DownloadTest.XVIDEO_FILE)
if os.path.exists(DownloadTest.VIMEO_FILE): if os.path.exists(DownloadTest.VIMEO_FILE):
os.remove(DownloadTest.VIMEO_FILE) os.remove(DownloadTest.VIMEO_FILE)
if os.path.exists(DownloadTest.SOUNDCLOUD_FILE): if os.path.exists(DownloadTest.SOUNDCLOUD_FILE):
os.remove(DownloadTest.SOUNDCLOUD_FILE) os.remove(DownloadTest.SOUNDCLOUD_FILE)
if os.path.exists(DownloadTest.STANDFORD_FILE): if os.path.exists(DownloadTest.STANDFORD_FILE):
os.remove(DownloadTest.STANDFORD_FILE) os.remove(DownloadTest.STANDFORD_FILE)
if os.path.exists(DownloadTest.COLLEGEHUMOR_FILE): if os.path.exists(DownloadTest.COLLEGEHUMOR_FILE):
os.remove(DownloadTest.COLLEGEHUMOR_FILE) os.remove(DownloadTest.COLLEGEHUMOR_FILE)
if os.path.exists(DownloadTest.XNXX_FILE): if os.path.exists(DownloadTest.XNXX_FILE):
os.remove(DownloadTest.XNXX_FILE) os.remove(DownloadTest.XNXX_FILE)
def md5_for_file(filename, block_size=2**20): def md5_for_file(filename, block_size=2**20):
with open(filename) as f: with open(filename) as f:
md5 = hashlib.md5() md5 = hashlib.md5()
while True: while True:
data = f.read(block_size) data = f.read(block_size)
if not data: if not data:
break break
md5.update(data) md5.update(data)
return md5.hexdigest() return md5.hexdigest()

View File

@ -5,9 +5,9 @@ import os.path
import subprocess import subprocess
class TestImport(unittest.TestCase): class TestImport(unittest.TestCase):
def test_import(self): def test_import(self):
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -14,79 +14,79 @@ from youtube_dl.utils import unescapeHTML
from youtube_dl.utils import orderedSet from youtube_dl.utils import orderedSet
if sys.version_info < (3,0): if sys.version_info < (3,0):
_compat_str = lambda b: b.decode('unicode-escape') _compat_str = lambda b: b.decode('unicode-escape')
else: else:
_compat_str = lambda s: s _compat_str = lambda s: s
class TestUtil(unittest.TestCase): class TestUtil(unittest.TestCase):
def test_timeconvert(self): def test_timeconvert(self):
self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None) self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self): def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc') self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123') self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de')) self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///')) self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|')) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no')) self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that')) self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T') self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = _compat_str('\xe4') aumlaut = _compat_str('\xe4')
self.assertEqual(sanitize_filename(aumlaut), aumlaut) self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430') tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
self.assertEqual(sanitize_filename(tests), tests) self.assertEqual(sanitize_filename(tests), tests)
forbidden = '"\0\\/' forbidden = '"\0\\/'
for fc in forbidden: for fc in forbidden:
for fbc in forbidden: for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc)) self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self): def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123') self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests =_compat_str('a\xe4b\u4e2d\u56fd\u7684c') tests =_compat_str('a\xe4b\u4e2d\u56fd\u7684c')
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c') self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n' forbidden = '"\0\\/&!: \'\t\n'
for fc in forbidden: for fc in forbidden:
for fbc in forbidden: for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly # Handle a common case more neatly
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song') self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech') self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
# .. but make sure the file name is never empty # .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '') self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '') self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_ordered_set(self): def test_ordered_set(self):
self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7]) self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7])
self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1]) self.assertEqual(orderedSet([1]), [1])
#keep the list ordered #keep the list ordered
self.assertEqual(orderedSet([135,1,1,1]), [135,1]) self.assertEqual(orderedSet([135,1,1,1]), [135,1])
def test_unescape_html(self): def test_unescape_html(self):
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;')) self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -10,707 +10,707 @@ import sys
import time import time
if os.name == 'nt': if os.name == 'nt':
import ctypes import ctypes
from utils import * from utils import *
class FileDownloader(object): class FileDownloader(object):
"""File Downloader class. """File Downloader class.
File downloader objects are the ones responsible of downloading the File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them. has to pass the URL to one of them.
For this, file downloader objects have a method that allows For this, file downloader objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed InfoExtractors to be registered in a given order. When it is passed
a URL, the file downloader handles it to the first InfoExtractor it a URL, the file downloader handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and all the information about the video or videos the URL refers to, and
asks the FileDownloader to process the video information, possibly asks the FileDownloader to process the video information, possibly
downloading the video. downloading the video.
File downloaders accept a lot of parameters. In order not to saturate File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params options instead. These options are available through the params
attribute for the InfoExtractors to use. The FileDownloader also attribute for the InfoExtractors to use. The FileDownloader also
registers itself as the downloader in charge for the InfoExtractors registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration". that are added to it, so this is a "mutual registration".
Available options: Available options:
username: Username for authentication purposes. username: Username for authentication purposes.
password: Password for authentication purposes. password: Password for authentication purposes.
usenetrc: Use netrc for authentication instead. usenetrc: Use netrc for authentication instead.
quiet: Do not print messages to stdout. quiet: Do not print messages to stdout.
forceurl: Force printing final URL. forceurl: Force printing final URL.
forcetitle: Force printing title. forcetitle: Force printing title.
forcethumbnail: Force printing thumbnail URL. forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description. forcedescription: Force printing description.
forcefilename: Force printing final filename. forcefilename: Force printing final filename.
simulate: Do not download the video files. simulate: Do not download the video files.
format: Video format code. format: Video format code.
format_limit: Highest quality format to try. format_limit: Highest quality format to try.
outtmpl: Template for output names. outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors. ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec. ratelimit: Download speed limit, in bytes/sec.
nooverwrites: Prevent overwriting files. nooverwrites: Prevent overwriting files.
retries: Number of times to retry for HTTP error 5xx retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes. buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer. noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible. continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar. noprogress: Do not print the progress bar.
playliststart: Playlist item to start at. playliststart: Playlist item to start at.
playlistend: Playlist item to end at. playlistend: Playlist item to end at.
matchtitle: Download only matching titles. matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles. rejecttitle: Reject downloads for matching titles.
logtostderr: Log messages to stderr instead of stdout. logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar. consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files. nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps. updatetime: Use the Last-modified header to set output file timestamps.
writedescription: Write the video description to a .description file writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file writeinfojson: Write the video description to a .info.json file
writesubtitles: Write the video subtitles to a .srt file writesubtitles: Write the video subtitles to a .srt file
subtitleslang: Language of the subtitles to download subtitleslang: Language of the subtitles to download
""" """
params = None params = None
_ies = [] _ies = []
_pps = [] _pps = []
_download_retcode = None _download_retcode = None
_num_downloads = None _num_downloads = None
_screen_file = None _screen_file = None
def __init__(self, params): def __init__(self, params):
"""Create a FileDownloader object with the given options.""" """Create a FileDownloader object with the given options."""
self._ies = [] self._ies = []
self._pps = [] self._pps = []
self._download_retcode = 0 self._download_retcode = 0
self._num_downloads = 0 self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self.params = params self.params = params
if '%(stitle)s' in self.params['outtmpl']: if '%(stitle)s' in self.params['outtmpl']:
self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
@staticmethod @staticmethod
def format_bytes(bytes): def format_bytes(bytes):
if bytes is None: if bytes is None:
return 'N/A' return 'N/A'
if type(bytes) is str: if type(bytes) is str:
bytes = float(bytes) bytes = float(bytes)
if bytes == 0.0: if bytes == 0.0:
exponent = 0 exponent = 0
else: else:
exponent = int(math.log(bytes, 1024.0)) exponent = int(math.log(bytes, 1024.0))
suffix = 'bkMGTPEZY'[exponent] suffix = 'bkMGTPEZY'[exponent]
converted = float(bytes) / float(1024 ** exponent) converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix) return '%.2f%s' % (converted, suffix)
@staticmethod @staticmethod
def calc_percent(byte_counter, data_len): def calc_percent(byte_counter, data_len):
if data_len is None: if data_len is None:
return '---.-%' return '---.-%'
return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
@staticmethod @staticmethod
def calc_eta(start, now, total, current): def calc_eta(start, now, total, current):
if total is None: if total is None:
return '--:--' return '--:--'
dif = now - start dif = now - start
if current == 0 or dif < 0.001: # One millisecond if current == 0 or dif < 0.001: # One millisecond
return '--:--' return '--:--'
rate = float(current) / dif rate = float(current) / dif
eta = int((float(total) - float(current)) / rate) eta = int((float(total) - float(current)) / rate)
(eta_mins, eta_secs) = divmod(eta, 60) (eta_mins, eta_secs) = divmod(eta, 60)
if eta_mins > 99: if eta_mins > 99:
return '--:--' return '--:--'
return '%02d:%02d' % (eta_mins, eta_secs) return '%02d:%02d' % (eta_mins, eta_secs)
@staticmethod @staticmethod
def calc_speed(start, now, bytes): def calc_speed(start, now, bytes):
dif = now - start dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond if bytes == 0 or dif < 0.001: # One millisecond
return '%10s' % '---b/s' return '%10s' % '---b/s'
return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
@staticmethod @staticmethod
def best_block_size(elapsed_time, bytes): def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0) new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001: if elapsed_time < 0.001:
return int(new_max) return int(new_max)
rate = bytes / elapsed_time rate = bytes / elapsed_time
if rate > new_max: if rate > new_max:
return int(new_max) return int(new_max)
if rate < new_min: if rate < new_min:
return int(new_min) return int(new_min)
return int(rate) return int(rate)
@staticmethod @staticmethod
def parse_bytes(bytestr): def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer.""" """Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None: if matchobj is None:
return None return None
number = float(matchobj.group(1)) number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier)) return int(round(number * multiplier))
def add_info_extractor(self, ie): def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list.""" """Add an InfoExtractor object to the end of the list."""
self._ies.append(ie) self._ies.append(ie)
ie.set_downloader(self) ie.set_downloader(self)
def add_post_processor(self, pp): def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain.""" """Add a PostProcessor object to the end of the chain."""
self._pps.append(pp) self._pps.append(pp)
pp.set_downloader(self) pp.set_downloader(self)
def to_screen(self, message, skip_eol=False): def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode.""" """Print message to stdout if not in quiet mode."""
assert type(message) == type(u'') assert type(message) == type(u'')
if not self.params.get('quiet', False): if not self.params.get('quiet', False):
terminator = [u'\n', u''][skip_eol] terminator = [u'\n', u''][skip_eol]
output = message + terminator output = message + terminator
if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
output = output.encode(preferredencoding(), 'ignore') output = output.encode(preferredencoding(), 'ignore')
self._screen_file.write(output) self._screen_file.write(output)
self._screen_file.flush() self._screen_file.flush()
def to_stderr(self, message): def to_stderr(self, message):
"""Print message to stderr.""" """Print message to stderr."""
assert type(message) == type(u'') assert type(message) == type(u'')
output = message + u'\n' output = message + u'\n'
if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
output = output.encode(preferredencoding()) output = output.encode(preferredencoding())
sys.stderr.write(output) sys.stderr.write(output)
def to_cons_title(self, message): def to_cons_title(self, message):
"""Set console/terminal window title to message.""" """Set console/terminal window title to message."""
if not self.params.get('consoletitle', False): if not self.params.get('consoletitle', False):
return return
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is # c_wchar_p() might not be necessary if `message` is
# already of type unicode() # already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ: elif 'TERM' in os.environ:
sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
def fixed_template(self): def fixed_template(self):
"""Checks if the output template is fixed.""" """Checks if the output template is fixed."""
return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None) return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None)
def trouble(self, message=None): def trouble(self, message=None):
"""Determine action to take when a download problem appears. """Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or download errors or not, this method may throw an exception or
not when errors are found, after printing the message. not when errors are found, after printing the message.
""" """
if message is not None: if message is not None:
self.to_stderr(message) self.to_stderr(message)
if not self.params.get('ignoreerrors', False): if not self.params.get('ignoreerrors', False):
raise DownloadError(message) raise DownloadError(message)
self._download_retcode = 1 self._download_retcode = 1
def slow_down(self, start_time, byte_counter): def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit.""" """Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None) rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0: if rate_limit is None or byte_counter == 0:
return return
now = time.time() now = time.time()
elapsed = now - start_time elapsed = now - start_time
if elapsed <= 0.0: if elapsed <= 0.0:
return return
speed = float(byte_counter) / elapsed speed = float(byte_counter) / elapsed
if speed > rate_limit: if speed > rate_limit:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
def temp_name(self, filename): def temp_name(self, filename):
"""Returns a temporary filename for the given filename.""" """Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == u'-' or \ if self.params.get('nopart', False) or filename == u'-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename return filename
return filename + u'.part' return filename + u'.part'
def undo_temp_name(self, filename): def undo_temp_name(self, filename):
if filename.endswith(u'.part'): if filename.endswith(u'.part'):
return filename[:-len(u'.part')] return filename[:-len(u'.part')]
return filename return filename
def try_rename(self, old_filename, new_filename): def try_rename(self, old_filename, new_filename):
try: try:
if old_filename == new_filename: if old_filename == new_filename:
return return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err: except (IOError, OSError) as err:
self.trouble(u'ERROR: unable to rename file') self.trouble(u'ERROR: unable to rename file')
def try_utime(self, filename, last_modified_hdr): def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file.""" """Try to set the last-modified time of the given file."""
if last_modified_hdr is None: if last_modified_hdr is None:
return return
if not os.path.isfile(encodeFilename(filename)): if not os.path.isfile(encodeFilename(filename)):
return return
timestr = last_modified_hdr timestr = last_modified_hdr
if timestr is None: if timestr is None:
return return
filetime = timeconvert(timestr) filetime = timeconvert(timestr)
if filetime is None: if filetime is None:
return filetime return filetime
try: try:
os.utime(filename, (time.time(), filetime)) os.utime(filename, (time.time(), filetime))
except: except:
pass pass
return filetime return filetime
def report_writedescription(self, descfn): def report_writedescription(self, descfn):
""" Report that the description file is being written """ """ Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: ' + descfn) self.to_screen(u'[info] Writing video description to: ' + descfn)
def report_writesubtitles(self, srtfn): def report_writesubtitles(self, srtfn):
""" Report that the subtitles file is being written """ """ Report that the subtitles file is being written """
self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
def report_writeinfojson(self, infofn): def report_writeinfojson(self, infofn):
""" Report that the metadata file has been written """ """ Report that the metadata file has been written """
self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
def report_destination(self, filename): def report_destination(self, filename):
"""Report destination filename.""" """Report destination filename."""
self.to_screen(u'[download] Destination: ' + filename) self.to_screen(u'[download] Destination: ' + filename)
def report_progress(self, percent_str, data_len_str, speed_str, eta_str): def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
"""Report download progress.""" """Report download progress."""
if self.params.get('noprogress', False): if self.params.get('noprogress', False):
return return
self.to_screen(u'\r[download] %s of %s at %s ETA %s' % self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True) (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
def report_resuming_byte(self, resume_len): def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte.""" """Report attempt to resume at given byte."""
self.to_screen(u'[download] Resuming download at byte %s' % resume_len) self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries): def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx""" """Report retry in case of HTTP error 5xx"""
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name): def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded.""" """Report file has already been fully downloaded."""
try: try:
self.to_screen(u'[download] %s has already been downloaded' % file_name) self.to_screen(u'[download] %s has already been downloaded' % file_name)
except (UnicodeEncodeError) as err: except (UnicodeEncodeError) as err:
self.to_screen(u'[download] The file has already been downloaded') self.to_screen(u'[download] The file has already been downloaded')
def report_unable_to_resume(self): def report_unable_to_resume(self):
"""Report it was impossible to resume download.""" """Report it was impossible to resume download."""
self.to_screen(u'[download] Unable to resume') self.to_screen(u'[download] Unable to resume')
def report_finish(self): def report_finish(self):
"""Report download finished.""" """Report download finished."""
if self.params.get('noprogress', False): if self.params.get('noprogress', False):
self.to_screen(u'[download] Download completed') self.to_screen(u'[download] Download completed')
else: else:
self.to_screen(u'') self.to_screen(u'')
def increment_downloads(self): def increment_downloads(self):
"""Increment the ordinal that assigns a number to each file.""" """Increment the ordinal that assigns a number to each file."""
self._num_downloads += 1 self._num_downloads += 1
def prepare_filename(self, info_dict): def prepare_filename(self, info_dict):
"""Generate the output filename.""" """Generate the output filename."""
try: try:
template_dict = dict(info_dict) template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time()) template_dict['epoch'] = int(time.time())
template_dict['autonumber'] = u'%05d' % self._num_downloads template_dict['autonumber'] = u'%05d' % self._num_downloads
template_dict = dict((key, u'NA' if val is None else val) for key, val in template_dict.items()) template_dict = dict((key, u'NA' if val is None else val) for key, val in template_dict.items())
template_dict = dict((k, sanitize_filename(compat_str(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items()) template_dict = dict((k, sanitize_filename(compat_str(v), self.params.get('restrictfilenames'))) for k,v in template_dict.items())
filename = self.params['outtmpl'] % template_dict filename = self.params['outtmpl'] % template_dict
return filename return filename
except (ValueError, KeyError) as err: except (ValueError, KeyError) as err:
self.trouble(u'ERROR: invalid system charset or erroneous output template') self.trouble(u'ERROR: invalid system charset or erroneous output template')
return None return None
def _match_entry(self, info_dict): def _match_entry(self, info_dict):
""" Returns None iff the file should be downloaded """ """ Returns None iff the file should be downloaded """
title = info_dict['title'] title = info_dict['title']
matchtitle = self.params.get('matchtitle', False) matchtitle = self.params.get('matchtitle', False)
if matchtitle: if matchtitle:
matchtitle = matchtitle.decode('utf8') matchtitle = matchtitle.decode('utf8')
if not re.search(matchtitle, title, re.IGNORECASE): if not re.search(matchtitle, title, re.IGNORECASE):
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False) rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle: if rejecttitle:
rejecttitle = rejecttitle.decode('utf8') rejecttitle = rejecttitle.decode('utf8')
if re.search(rejecttitle, title, re.IGNORECASE): if re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None return None
def process_info(self, info_dict): def process_info(self, info_dict):
"""Process a single dictionary returned by an InfoExtractor.""" """Process a single dictionary returned by an InfoExtractor."""
# Keep for backwards compatibility # Keep for backwards compatibility
info_dict['stitle'] = info_dict['title'] info_dict['stitle'] = info_dict['title']
if not 'format' in info_dict: if not 'format' in info_dict:
info_dict['format'] = info_dict['ext'] info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict) reason = self._match_entry(info_dict)
if reason is not None: if reason is not None:
self.to_screen(u'[download] ' + reason) self.to_screen(u'[download] ' + reason)
return return
max_downloads = self.params.get('max_downloads') max_downloads = self.params.get('max_downloads')
if max_downloads is not None: if max_downloads is not None:
if self._num_downloads > int(max_downloads): if self._num_downloads > int(max_downloads):
raise MaxDownloadsReached() raise MaxDownloadsReached()
filename = self.prepare_filename(info_dict) filename = self.prepare_filename(info_dict)
# Forced printings # Forced printings
if self.params.get('forcetitle', False): if self.params.get('forcetitle', False):
compat_print(info_dict['title']) compat_print(info_dict['title'])
if self.params.get('forceurl', False): if self.params.get('forceurl', False):
compat_print(info_dict['url']) compat_print(info_dict['url'])
if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
compat_print(info_dict['thumbnail']) compat_print(info_dict['thumbnail'])
if self.params.get('forcedescription', False) and 'description' in info_dict: if self.params.get('forcedescription', False) and 'description' in info_dict:
compat_print(info_dict['description']) compat_print(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None: if self.params.get('forcefilename', False) and filename is not None:
compat_print(filename) compat_print(filename)
if self.params.get('forceformat', False): if self.params.get('forceformat', False):
compat_print(info_dict['format']) compat_print(info_dict['format'])
# Do nothing else if in simulate mode # Do nothing else if in simulate mode
if self.params.get('simulate', False): if self.params.get('simulate', False):
return return
if filename is None: if filename is None:
return return
try: try:
dn = os.path.dirname(encodeFilename(filename)) dn = os.path.dirname(encodeFilename(filename))
if dn != '' and not os.path.exists(dn): # dn is already encoded if dn != '' and not os.path.exists(dn): # dn is already encoded
os.makedirs(dn) os.makedirs(dn)
except (OSError, IOError) as err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
return return
if self.params.get('writedescription', False): if self.params.get('writedescription', False):
try: try:
descfn = filename + u'.description' descfn = filename + u'.description'
self.report_writedescription(descfn) self.report_writedescription(descfn)
descfile = open(encodeFilename(descfn), 'wb') descfile = open(encodeFilename(descfn), 'wb')
try: try:
descfile.write(info_dict['description'].encode('utf-8')) descfile.write(info_dict['description'].encode('utf-8'))
finally: finally:
descfile.close() descfile.close()
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: Cannot write description file ' + descfn) self.trouble(u'ERROR: Cannot write description file ' + descfn)
return return
if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
# subtitles download errors are already managed as troubles in relevant IE # subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE # that way it will silently go on when used with unsupporting IE
try: try:
srtfn = filename.rsplit('.', 1)[0] + u'.srt' srtfn = filename.rsplit('.', 1)[0] + u'.srt'
self.report_writesubtitles(srtfn) self.report_writesubtitles(srtfn)
srtfile = open(encodeFilename(srtfn), 'wb') srtfile = open(encodeFilename(srtfn), 'wb')
try: try:
srtfile.write(info_dict['subtitles'].encode('utf-8')) srtfile.write(info_dict['subtitles'].encode('utf-8'))
finally: finally:
srtfile.close() srtfile.close()
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
return return
if self.params.get('writeinfojson', False): if self.params.get('writeinfojson', False):
infofn = filename + u'.info.json' infofn = filename + u'.info.json'
self.report_writeinfojson(infofn) self.report_writeinfojson(infofn)
try: try:
json.dump json.dump
except (NameError,AttributeError): except (NameError,AttributeError):
self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.')
return return
try: try:
infof = open(encodeFilename(infofn), 'wb') infof = open(encodeFilename(infofn), 'wb')
try: try:
json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',)) json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
json.dump(json_info_dict, infof) json.dump(json_info_dict, infof)
finally: finally:
infof.close() infof.close()
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
return return
if not self.params.get('skip_download', False): if not self.params.get('skip_download', False):
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
success = True success = True
else: else:
try: try:
success = self._do_download(filename, info_dict) success = self._do_download(filename, info_dict)
except (OSError, IOError) as err: except (OSError, IOError) as err:
raise UnavailableVideoError raise UnavailableVideoError
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.trouble(u'ERROR: unable to download video data: %s' % str(err)) self.trouble(u'ERROR: unable to download video data: %s' % str(err))
return return
except (ContentTooShortError, ) as err: except (ContentTooShortError, ) as err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return return
if success: if success:
try: try:
self.post_process(filename, info_dict) self.post_process(filename, info_dict)
except (PostProcessingError) as err: except (PostProcessingError) as err:
self.trouble(u'ERROR: postprocessing: %s' % str(err)) self.trouble(u'ERROR: postprocessing: %s' % str(err))
return return
def download(self, url_list): def download(self, url_list):
"""Download a given list of URLs.""" """Download a given list of URLs."""
if len(url_list) > 1 and self.fixed_template(): if len(url_list) > 1 and self.fixed_template():
raise SameFileError(self.params['outtmpl']) raise SameFileError(self.params['outtmpl'])
for url in url_list: for url in url_list:
suitable_found = False suitable_found = False
for ie in self._ies: for ie in self._ies:
# Go to next InfoExtractor if not suitable # Go to next InfoExtractor if not suitable
if not ie.suitable(url): if not ie.suitable(url):
continue continue
# Warn if the _WORKING attribute is False # Warn if the _WORKING attribute is False
if not ie.working(): if not ie.working():
self.trouble(u'WARNING: the program functionality for this site has been marked as broken, ' self.trouble(u'WARNING: the program functionality for this site has been marked as broken, '
u'and will probably not work. If you want to go on, use the -i option.') u'and will probably not work. If you want to go on, use the -i option.')
# Suitable InfoExtractor found # Suitable InfoExtractor found
suitable_found = True suitable_found = True
# Extract information from URL and process it # Extract information from URL and process it
videos = ie.extract(url) videos = ie.extract(url)
for video in videos or []: for video in videos or []:
video['extractor'] = ie.IE_NAME video['extractor'] = ie.IE_NAME
try: try:
self.increment_downloads() self.increment_downloads()
self.process_info(video) self.process_info(video)
except UnavailableVideoError: except UnavailableVideoError:
self.trouble(u'\nERROR: unable to download video') self.trouble(u'\nERROR: unable to download video')
# Suitable InfoExtractor had been found; go to next URL # Suitable InfoExtractor had been found; go to next URL
break break
if not suitable_found: if not suitable_found:
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
return self._download_retcode return self._download_retcode
def post_process(self, filename, ie_info): def post_process(self, filename, ie_info):
"""Run the postprocessing chain on the given file.""" """Run the postprocessing chain on the given file."""
info = dict(ie_info) info = dict(ie_info)
info['filepath'] = filename info['filepath'] = filename
for pp in self._pps: for pp in self._pps:
info = pp.run(info) info = pp.run(info)
if info is None: if info is None:
break break
def _download_with_rtmpdump(self, filename, url, player_url): def _download_with_rtmpdump(self, filename, url, player_url):
self.report_destination(filename) self.report_destination(filename)
tmpfilename = self.temp_name(filename) tmpfilename = self.temp_name(filename)
# Check for rtmpdump first # Check for rtmpdump first
try: try:
subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
return False return False
# Download using rtmpdump. rtmpdump returns exit code 2 when # Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be # the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK. # possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
if self.params.get('verbose', False): if self.params.get('verbose', False):
try: try:
import pipes import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, args)) shell_quote = lambda args: ' '.join(map(pipes.quote, args))
except ImportError: except ImportError:
shell_quote = repr shell_quote = repr
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
retval = subprocess.call(args) retval = subprocess.call(args)
while retval == 2 or retval == 1: while retval == 2 or retval == 1:
prevsize = os.path.getsize(encodeFilename(tmpfilename)) prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
time.sleep(5.0) # This seems to be needed time.sleep(5.0) # This seems to be needed
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
cursize = os.path.getsize(encodeFilename(tmpfilename)) cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == 1: if prevsize == cursize and retval == 1:
break break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == 2 and cursize > 1024: if prevsize == cursize and retval == 2 and cursize > 1024:
self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = 0 retval = 0
break break
if retval == 0: if retval == 0:
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename)))
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
return True return True
else: else:
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
return False return False
def _do_download(self, filename, info_dict): def _do_download(self, filename, info_dict):
url = info_dict['url'] url = info_dict['url']
player_url = info_dict.get('player_url', None) player_url = info_dict.get('player_url', None)
# Check file already present # Check file already present
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
self.report_file_already_downloaded(filename) self.report_file_already_downloaded(filename)
return True return True
# Attempt to download using rtmpdump # Attempt to download using rtmpdump
if url.startswith('rtmp'): if url.startswith('rtmp'):
return self._download_with_rtmpdump(filename, url, player_url) return self._download_with_rtmpdump(filename, url, player_url)
tmpfilename = self.temp_name(filename) tmpfilename = self.temp_name(filename)
stream = None stream = None
# Do not include the Accept-Encoding header # Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'} headers = {'Youtubedl-no-compression': 'True'}
basic_request = compat_urllib_request.Request(url, None, headers) basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers) request = compat_urllib_request.Request(url, None, headers)
# Establish possible resume length # Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)): if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename)) resume_len = os.path.getsize(encodeFilename(tmpfilename))
else: else:
resume_len = 0 resume_len = 0
open_mode = 'wb' open_mode = 'wb'
if resume_len != 0: if resume_len != 0:
if self.params.get('continuedl', False): if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len) self.report_resuming_byte(resume_len)
request.add_header('Range','bytes=%d-' % resume_len) request.add_header('Range','bytes=%d-' % resume_len)
open_mode = 'ab' open_mode = 'ab'
else: else:
resume_len = 0 resume_len = 0
count = 0 count = 0
retries = self.params.get('retries', 0) retries = self.params.get('retries', 0)
while count <= retries: while count <= retries:
# Establish connection # Establish connection
try: try:
if count == 0 and 'urlhandle' in info_dict: if count == 0 and 'urlhandle' in info_dict:
data = info_dict['urlhandle'] data = info_dict['urlhandle']
data = compat_urllib_request.urlopen(request) data = compat_urllib_request.urlopen(request)
break break
except (compat_urllib_error.HTTPError, ) as err: except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416: if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error # Unexpected HTTP error
raise raise
elif err.code == 416: elif err.code == 416:
# Unable to resume (requested range not satisfiable) # Unable to resume (requested range not satisfiable)
try: try:
# Open the connection again without the range header # Open the connection again without the range header
data = compat_urllib_request.urlopen(basic_request) data = compat_urllib_request.urlopen(basic_request)
content_length = data.info()['Content-Length'] content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err: except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600: if err.code < 500 or err.code >= 600:
raise raise
else: else:
# Examine the reported length # Examine the reported length
if (content_length is not None and if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)): (resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded. # The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that # Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file, # YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So # changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file # I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from # completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive. # the one in the hard drive.
self.report_file_already_downloaded(filename) self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
return True return True
else: else:
# The length does not match, we start the download over # The length does not match, we start the download over
self.report_unable_to_resume() self.report_unable_to_resume()
open_mode = 'wb' open_mode = 'wb'
break break
# Retry # Retry
count += 1 count += 1
if count <= retries: if count <= retries:
self.report_retry(count, retries) self.report_retry(count, retries)
if count > retries: if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries) self.trouble(u'ERROR: giving up after %s retries' % retries)
return False return False
data_len = data.info().get('Content-length', None) data_len = data.info().get('Content-length', None)
if data_len is not None: if data_len is not None:
data_len = int(data_len) + resume_len data_len = int(data_len) + resume_len
data_len_str = self.format_bytes(data_len) data_len_str = self.format_bytes(data_len)
byte_counter = 0 + resume_len byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024) block_size = self.params.get('buffersize', 1024)
start = time.time() start = time.time()
while True: while True:
# Download and write # Download and write
before = time.time() before = time.time()
data_block = data.read(block_size) data_block = data.read(block_size)
after = time.time() after = time.time()
if len(data_block) == 0: if len(data_block) == 0:
break break
byte_counter += len(data_block) byte_counter += len(data_block)
# Open file just in time # Open file just in time
if stream is None: if stream is None:
try: try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None assert stream is not None
filename = self.undo_temp_name(tmpfilename) filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename) self.report_destination(filename)
except (OSError, IOError) as err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
return False return False
try: try:
stream.write(data_block) stream.write(data_block)
except (IOError, OSError) as err: except (IOError, OSError) as err:
self.trouble(u'\nERROR: unable to write data: %s' % str(err)) self.trouble(u'\nERROR: unable to write data: %s' % str(err))
return False return False
if not self.params.get('noresizebuffer', False): if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block)) block_size = self.best_block_size(after - before, len(data_block))
# Progress message # Progress message
speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
if data_len is None: if data_len is None:
self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
else: else:
percent_str = self.calc_percent(byte_counter, data_len) percent_str = self.calc_percent(byte_counter, data_len)
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent_str, data_len_str, speed_str, eta_str) self.report_progress(percent_str, data_len_str, speed_str, eta_str)
# Apply rate limit # Apply rate limit
self.slow_down(start, byte_counter - resume_len) self.slow_down(start, byte_counter - resume_len)
if stream is None: if stream is None:
self.trouble(u'\nERROR: Did not get any data blocks') self.trouble(u'\nERROR: Did not get any data blocks')
return False return False
stream.close() stream.close()
self.report_finish() self.report_finish()
if data_len is not None and byte_counter != data_len: if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len)) raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename) self.try_rename(tmpfilename, filename)
# Update file modification time # Update file modification time
if self.params.get('updatetime', True): if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
return True return True

View File

@ -16,3553 +16,3553 @@ from utils import *
class InfoExtractor(object): class InfoExtractor(object):
"""Information Extractor class. """Information Extractor class.
Information extractors are the classes that, given a URL, extract Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then others. The information is stored in a dictionary which is then
passed to the FileDownloader. The FileDownloader processes this passed to the FileDownloader. The FileDownloader processes this
information possibly downloading the video to the file system, among information possibly downloading the video to the file system, among
other possible outcomes. other possible outcomes.
The dictionaries must include the following fields: The dictionaries must include the following fields:
id: Video identifier. id: Video identifier.
url: Final video URL. url: Final video URL.
uploader: Nickname of the video uploader, unescaped. uploader: Nickname of the video uploader, unescaped.
upload_date: Video upload date (YYYYMMDD). upload_date: Video upload date (YYYYMMDD).
title: Video title, unescaped. title: Video title, unescaped.
ext: Video filename extension. ext: Video filename extension.
The following fields are optional: The following fields are optional:
format: The video format, defaults to ext (used for --get-format) format: The video format, defaults to ext (used for --get-format)
thumbnail: Full URL to a video thumbnail image. thumbnail: Full URL to a video thumbnail image.
description: One-line video description. description: One-line video description.
player_url: SWF Player URL (used for rtmpdump). player_url: SWF Player URL (used for rtmpdump).
subtitles: The .srt file contents. subtitles: The .srt file contents.
urlhandle: [internal] The urlHandle to be used to download the file, urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen like returned by urllib.request.urlopen
The fields should all be Unicode strings. The fields should all be Unicode strings.
Subclasses of this one should re-define the _real_initialize() and Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp. _real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors. Probably, they should also be added to the list of extractors.
_real_extract() must return a *list* of information dictionaries as _real_extract() must return a *list* of information dictionaries as
described above. described above.
Finally, the _WORKING attribute should be set to False for broken IEs Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests. in order to warn the users and skip the tests.
""" """
_ready = False _ready = False
_downloader = None _downloader = None
_WORKING = True _WORKING = True
def __init__(self, downloader=None): def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader.""" """Constructor. Receives an optional downloader."""
self._ready = False self._ready = False
self.set_downloader(downloader) self.set_downloader(downloader)
def suitable(self, url): def suitable(self, url):
"""Receives a URL and returns True if suitable for this IE.""" """Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url) is not None return re.match(self._VALID_URL, url) is not None
def working(self): def working(self):
"""Getter method for _WORKING.""" """Getter method for _WORKING."""
return self._WORKING return self._WORKING
def initialize(self): def initialize(self):
"""Initializes an instance (authentication, etc).""" """Initializes an instance (authentication, etc)."""
if not self._ready: if not self._ready:
self._real_initialize() self._real_initialize()
self._ready = True self._ready = True
def extract(self, url): def extract(self, url):
"""Extracts URL information and returns it in list of dicts.""" """Extracts URL information and returns it in list of dicts."""
self.initialize() self.initialize()
return self._real_extract(url) return self._real_extract(url)
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this IE.""" """Sets the downloader for this IE."""
self._downloader = downloader self._downloader = downloader
def _real_initialize(self): def _real_initialize(self):
"""Real initialization process. Redefine in subclasses.""" """Real initialization process. Redefine in subclasses."""
pass pass
def _real_extract(self, url): def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses.""" """Real extraction process. Redefine in subclasses."""
pass pass
class YoutubeIE(InfoExtractor): class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com.""" """Information extractor for youtube.com."""
_VALID_URL = r"""^ _VALID_URL = r"""^
( (
(?:https?://)? # http(s):// (optional) (?:https?://)? # http(s):// (optional)
(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls (?:.*?\#/)? # handle anchor (#/) redirect urls
(?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID: (?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/ (?:(?:v|embed|e)/) # v/ or embed/ or e/
|(?: # or the v= param in all its forms |(?: # or the v= param in all its forms
(?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #! (?:\?|\#!?) # the params delimiter ? or # or #!
(?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx) (?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx)
v= v=
) )
)? # optional -> youtube.com/xxxx is OK )? # optional -> youtube.com/xxxx is OK
)? # all until now is optional -> you can pass the naked ID )? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]+) # here is it! the YouTube video ID ([0-9A-Za-z_-]+) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow (?(1).+)? # if we found the ID, everything can follow
$""" $"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)' _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube' _NETRC_MACHINE = 'youtube'
# Listed in order of quality # Listed in order of quality
_available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
_available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
_video_extensions = { _video_extensions = {
'13': '3gp', '13': '3gp',
'17': 'mp4', '17': 'mp4',
'18': 'mp4', '18': 'mp4',
'22': 'mp4', '22': 'mp4',
'37': 'mp4', '37': 'mp4',
'38': 'video', # You actually don't know if this will be MOV, AVI or whatever '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
'43': 'webm', '43': 'webm',
'44': 'webm', '44': 'webm',
'45': 'webm', '45': 'webm',
'46': 'webm', '46': 'webm',
} }
_video_dimensions = { _video_dimensions = {
'5': '240x400', '5': '240x400',
'6': '???', '6': '???',
'13': '???', '13': '???',
'17': '144x176', '17': '144x176',
'18': '360x640', '18': '360x640',
'22': '720x1280', '22': '720x1280',
'34': '360x640', '34': '360x640',
'35': '480x854', '35': '480x854',
'37': '1080x1920', '37': '1080x1920',
'38': '3072x4096', '38': '3072x4096',
'43': '360x640', '43': '360x640',
'44': '480x854', '44': '480x854',
'45': '720x1280', '45': '720x1280',
'46': '1080x1920', '46': '1080x1920',
} }
IE_NAME = u'youtube' IE_NAME = u'youtube'
def suitable(self, url): def suitable(self, url):
"""Receives a URL and returns True if suitable for this IE.""" """Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url, re.VERBOSE) is not None return re.match(self._VALID_URL, url, re.VERBOSE) is not None
def report_lang(self): def report_lang(self):
"""Report attempt to set language.""" """Report attempt to set language."""
self._downloader.to_screen(u'[youtube] Setting language') self._downloader.to_screen(u'[youtube] Setting language')
def report_login(self): def report_login(self):
"""Report attempt to log in.""" """Report attempt to log in."""
self._downloader.to_screen(u'[youtube] Logging in') self._downloader.to_screen(u'[youtube] Logging in')
def report_age_confirmation(self): def report_age_confirmation(self):
"""Report attempt to confirm age.""" """Report attempt to confirm age."""
self._downloader.to_screen(u'[youtube] Confirming age') self._downloader.to_screen(u'[youtube] Confirming age')
def report_video_webpage_download(self, video_id): def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage.""" """Report attempt to download video webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
def report_video_info_webpage_download(self, video_id): def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage.""" """Report attempt to download video info webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
def report_video_subtitles_download(self, video_id): def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage.""" """Report attempt to download video info webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
def report_information_extraction(self, video_id): def report_information_extraction(self, video_id):
"""Report attempt to extract video information.""" """Report attempt to extract video information."""
self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format): def report_unavailable_format(self, video_id, format):
"""Report extracted video URL.""" """Report extracted video URL."""
self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
def report_rtmp_download(self): def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol.""" """Indicate the download will use the RTMP protocol."""
self._downloader.to_screen(u'[youtube] RTMP download detected') self._downloader.to_screen(u'[youtube] RTMP download detected')
def _closed_captions_xml_to_srt(self, xml_string): def _closed_captions_xml_to_srt(self, xml_string):
srt = '' srt = ''
texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE) texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
# TODO parse xml instead of regex # TODO parse xml instead of regex
for n, (start, dur_tag, dur, caption) in enumerate(texts): for n, (start, dur_tag, dur, caption) in enumerate(texts):
if not dur: dur = '4' if not dur: dur = '4'
start = float(start) start = float(start)
end = start + float(dur) end = start + float(dur)
start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
caption = unescapeHTML(caption) caption = unescapeHTML(caption)
caption = unescapeHTML(caption) # double cycle, intentional caption = unescapeHTML(caption) # double cycle, intentional
srt += str(n+1) + '\n' srt += str(n+1) + '\n'
srt += start + ' --> ' + end + '\n' srt += start + ' --> ' + end + '\n'
srt += caption + '\n\n' srt += caption + '\n\n'
return srt return srt
def _print_formats(self, formats): def _print_formats(self, formats):
print('Available formats:') print('Available formats:')
for x in formats: for x in formats:
print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
def _real_initialize(self): def _real_initialize(self):
if self._downloader is None: if self._downloader is None:
return return
username = None username = None
password = None password = None
downloader_params = self._downloader.params downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data # Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None: if downloader_params.get('username', None) is not None:
username = downloader_params['username'] username = downloader_params['username']
password = downloader_params['password'] password = downloader_params['password']
elif downloader_params.get('usenetrc', False): elif downloader_params.get('usenetrc', False):
try: try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE) info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None: if info is not None:
username = info[0] username = info[0]
password = info[2] password = info[2]
else: else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return return
# Set language # Set language
request = compat_urllib_request.Request(self._LANG_URL) request = compat_urllib_request.Request(self._LANG_URL)
try: try:
self.report_lang() self.report_lang()
compat_urllib_request.urlopen(request).read() compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
return return
# No authentication to be performed # No authentication to be performed
if username is None: if username is None:
return return
# Log in # Log in
login_form = { login_form = {
'current_form': 'loginForm', 'current_form': 'loginForm',
'next': '/', 'next': '/',
'action_login': 'Log In', 'action_login': 'Log In',
'username': username, 'username': username,
'password': password, 'password': password,
} }
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
try: try:
self.report_login() self.report_login()
login_results = compat_urllib_request.urlopen(request).read() login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return return
# Confirm age # Confirm age
age_form = { age_form = {
'next_url': '/', 'next_url': '/',
'action_confirm': 'Confirm', 'action_confirm': 'Confirm',
} }
request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
try: try:
self.report_age_confirmation() self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read() age_results = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
# Extract original video URL from URL with redirection, like age verification, using next_url parameter # Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url) mobj = re.search(self._NEXT_URL_RE, url)
if mobj: if mobj:
url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
# Extract video id from URL # Extract video id from URL
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
# Get video webpage # Get video webpage
self.report_video_webpage_download(video_id) self.report_video_webpage_download(video_id)
request = compat_urllib_request.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) request = compat_urllib_request.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
try: try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read() video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
# Attempt to extract SWF player URL # Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None: if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else: else:
player_url = None player_url = None
# Get video info # Get video info
self.report_video_info_webpage_download(video_id) self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type)) % (video_id, el_type))
request = compat_urllib_request.Request(video_info_url) request = compat_urllib_request.Request(video_info_url)
try: try:
video_info_webpage_bytes = compat_urllib_request.urlopen(request).read() video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore') video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
video_info = compat_parse_qs(video_info_webpage) video_info = compat_parse_qs(video_info_webpage)
if 'token' in video_info: if 'token' in video_info:
break break
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
return return
if 'token' not in video_info: if 'token' not in video_info:
if 'reason' in video_info: if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
else: else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return return
# Check for "rental" videos # Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
self._downloader.trouble(u'ERROR: "rental" videos not supported') self._downloader.trouble(u'ERROR: "rental" videos not supported')
return return
# Start extracting information # Start extracting information
self.report_information_extraction(video_id) self.report_information_extraction(video_id)
# uploader # uploader
if 'author' not in video_info: if 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return return
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# title # title
if 'title' not in video_info: if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# thumbnail image # thumbnail image
if 'thumbnail_url' not in video_info: if 'thumbnail_url' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail') self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = '' video_thumbnail = ''
else: # don't panic if we can't find it else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
# upload date # upload date
upload_date = None upload_date = None
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
if mobj is not None: if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
for expression in format_expressions: for expression in format_expressions:
try: try:
upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
except: except:
pass pass
# description # description
video_description = get_element_by_id("eow-description", video_webpage) video_description = get_element_by_id("eow-description", video_webpage)
if video_description: if video_description:
video_description = clean_html(video_description) video_description = clean_html(video_description)
else: else:
video_description = '' video_description = ''
# closed captions # closed captions
video_subtitles = None video_subtitles = None
if self._downloader.params.get('writesubtitles', False): if self._downloader.params.get('writesubtitles', False):
try: try:
self.report_video_subtitles_download(video_id) self.report_video_subtitles_download(video_id)
request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try: try:
srt_list = compat_urllib_request.urlopen(request).read() srt_list = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
if not srt_lang_list: if not srt_lang_list:
raise Trouble(u'WARNING: video has no closed captions') raise Trouble(u'WARNING: video has no closed captions')
if self._downloader.params.get('subtitleslang', False): if self._downloader.params.get('subtitleslang', False):
srt_lang = self._downloader.params.get('subtitleslang') srt_lang = self._downloader.params.get('subtitleslang')
elif 'en' in srt_lang_list: elif 'en' in srt_lang_list:
srt_lang = 'en' srt_lang = 'en'
else: else:
srt_lang = srt_lang_list.keys()[0] srt_lang = srt_lang_list.keys()[0]
if not srt_lang in srt_lang_list: if not srt_lang in srt_lang_list:
raise Trouble(u'WARNING: no closed captions found in the specified language') raise Trouble(u'WARNING: no closed captions found in the specified language')
request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
try: try:
srt_xml = compat_urllib_request.urlopen(request).read() srt_xml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
if not srt_xml: if not srt_xml:
raise Trouble(u'WARNING: unable to download video subtitles') raise Trouble(u'WARNING: unable to download video subtitles')
video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))
except Trouble as trouble: except Trouble as trouble:
self._downloader.trouble(trouble[0]) self._downloader.trouble(trouble[0])
if 'length_seconds' not in video_info: if 'length_seconds' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video duration') self._downloader.trouble(u'WARNING: unable to extract video duration')
video_duration = '' video_duration = ''
else: else:
video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
# token # token
video_token = compat_urllib_parse.unquote_plus(video_info['token'][0]) video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
# Decide which formats to download # Decide which formats to download
req_format = self._downloader.params.get('format', None) req_format = self._downloader.params.get('format', None)
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download() self.report_rtmp_download()
video_url_list = [(None, video_info['conn'][0])] video_url_list = [(None, video_info['conn'][0])]
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
url_data = [compat_parse_qs(uds) for uds in url_data_strs] url_data = [compat_parse_qs(uds) for uds in url_data_strs]
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None) format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
if format_limit is not None and format_limit in available_formats: if format_limit is not None and format_limit in available_formats:
format_list = available_formats[available_formats.index(format_limit):] format_list = available_formats[available_formats.index(format_limit):]
else: else:
format_list = available_formats format_list = available_formats
existing_formats = [x for x in format_list if x in url_map] existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0: if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video') self._downloader.trouble(u'ERROR: no known formats available for video')
return return
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats) self._print_formats(existing_formats)
return return
if req_format is None or req_format == 'best': if req_format is None or req_format == 'best':
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst': elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
elif req_format in ('-1', 'all'): elif req_format in ('-1', 'all'):
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else: else:
# Specific formats. We pick the first in a slash-delimeted sequence. # Specific formats. We pick the first in a slash-delimeted sequence.
# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
req_formats = req_format.split('/') req_formats = req_format.split('/')
video_url_list = None video_url_list = None
for rf in req_formats: for rf in req_formats:
if rf in url_map: if rf in url_map:
video_url_list = [(rf, url_map[rf])] video_url_list = [(rf, url_map[rf])]
break break
if video_url_list is None: if video_url_list is None:
self._downloader.trouble(u'ERROR: requested format not available') self._downloader.trouble(u'ERROR: requested format not available')
return return
else: else:
self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
return return
results = [] results = []
for format_param, video_real_url in video_url_list: for format_param, video_real_url in video_url_list:
# Extension # Extension
video_extension = self._video_extensions.get(format_param, 'flv') video_extension = self._video_extensions.get(format_param, 'flv')
video_format = '{} - {}'.format(format_param if format_param else video_extension, video_format = '{} - {}'.format(format_param if format_param else video_extension,
self._video_dimensions.get(format_param, '???')) self._video_dimensions.get(format_param, '???'))
results.append({ results.append({
'id': video_id, 'id': video_id,
'url': video_real_url, 'url': video_real_url,
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': upload_date, 'upload_date': upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension, 'ext': video_extension,
'format': video_format, 'format': video_format,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
'player_url': player_url, 'player_url': player_url,
'subtitles': video_subtitles, 'subtitles': video_subtitles,
'duration': video_duration 'duration': video_duration
}) })
return results return results
class MetacafeIE(InfoExtractor): class MetacafeIE(InfoExtractor):
"""Information Extractor for metacafe.com.""" """Information Extractor for metacafe.com."""
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/' _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = u'metacafe' IE_NAME = u'metacafe'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_disclaimer(self): def report_disclaimer(self):
"""Report disclaimer retrieval.""" """Report disclaimer retrieval."""
self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
def report_age_confirmation(self): def report_age_confirmation(self):
"""Report attempt to confirm age.""" """Report attempt to confirm age."""
self._downloader.to_screen(u'[metacafe] Confirming age') self._downloader.to_screen(u'[metacafe] Confirming age')
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
def _real_initialize(self): def _real_initialize(self):
# Retrieve disclaimer # Retrieve disclaimer
request = compat_urllib_request.Request(self._DISCLAIMER) request = compat_urllib_request.Request(self._DISCLAIMER)
try: try:
self.report_disclaimer() self.report_disclaimer()
disclaimer = compat_urllib_request.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
return return
# Confirm age # Confirm age
disclaimer_form = { disclaimer_form = {
'filters': '0', 'filters': '0',
'submit': "Continue - I'm over 18", 'submit': "Continue - I'm over 18",
} }
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
try: try:
self.report_age_confirmation() self.report_age_confirmation()
disclaimer = compat_urllib_request.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Check if video comes from YouTube # Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id) mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None: if mobj2 is not None:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
return return
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader and title from webpage # Extract URL, uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None: if mobj is not None:
mediaURL = compat_urllib_parse.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_extension = mediaURL[-3:] video_extension = mediaURL[-3:]
# Extract gdaKey if available # Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None: if mobj is None:
video_url = mediaURL video_url = mediaURL
else: else:
gdaKey = mobj.group(1) gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
else: else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
vardict = compat_parse_qs(mobj.group(1)) vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict: if 'mediaData' not in vardict:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mediaURL = mobj.group(1).replace('\\/', '/') mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:] video_extension = mediaURL[-3:]
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage) mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return return
video_uploader = mobj.group(1) video_uploader = mobj.group(1)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'),
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]
class DailymotionIE(InfoExtractor): class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion""" """Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion' IE_NAME = u'dailymotion'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1).split('_')[0].split('?')[0] video_id = mobj.group(1).split('_')[0].split('?')[0]
video_extension = 'mp4' video_extension = 'mp4'
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off') request.add_header('Cookie', 'family_filter=off')
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader and title from webpage # Extract URL, uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'\s*var flashvars = (.*)', webpage) mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
flashvars = compat_urllib_parse.unquote(mobj.group(1)) flashvars = compat_urllib_parse.unquote(mobj.group(1))
for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
if key in flashvars: if key in flashvars:
max_quality = key max_quality = key
self._downloader.to_screen(u'[dailymotion] Using %s' % key) self._downloader.to_screen(u'[dailymotion] Using %s' % key)
break break
else: else:
self._downloader.trouble(u'ERROR: unable to extract video URL') self._downloader.trouble(u'ERROR: unable to extract video URL')
return return
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video URL') self._downloader.trouble(u'ERROR: unable to extract video URL')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
# TODO: support choosing qualities # TODO: support choosing qualities
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage) mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = unescapeHTML(mobj.group('title').decode('utf-8')) video_title = unescapeHTML(mobj.group('title').decode('utf-8'))
video_uploader = None video_uploader = None
mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage) mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
if mobj is None: if mobj is None:
# lookin for official user # lookin for official user
mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
if mobj_official is None: if mobj_official is None:
self._downloader.trouble(u'WARNING: unable to extract uploader nickname') self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
else: else:
video_uploader = mobj_official.group(1) video_uploader = mobj_official.group(1)
else: else:
video_uploader = mobj.group(1) video_uploader = mobj.group(1)
video_upload_date = None video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage) mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is not None: if mobj is not None:
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'),
'upload_date': video_upload_date, 'upload_date': video_upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]
class GoogleIE(InfoExtractor): class GoogleIE(InfoExtractor):
"""Information extractor for video.google.com.""" """Information extractor for video.google.com."""
_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*' _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
IE_NAME = u'video.google' IE_NAME = u'video.google'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id) self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
video_extension = 'mp4' video_extension = 'mp4'
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id) request = compat_urllib_request.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader, and title from webpage # Extract URL, uploader, and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r"download_url:'([^']+)'", webpage) mobj = re.search(r"download_url:'([^']+)'", webpage)
if mobj is None: if mobj is None:
video_extension = 'flv' video_extension = 'flv'
mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage) mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mediaURL = compat_urllib_parse.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
mediaURL = mediaURL.replace('\\x3d', '\x3d') mediaURL = mediaURL.replace('\\x3d', '\x3d')
mediaURL = mediaURL.replace('\\x26', '\x26') mediaURL = mediaURL.replace('\\x26', '\x26')
video_url = mediaURL video_url = mediaURL
mobj = re.search(r'<title>(.*)</title>', webpage) mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
# Extract video description # Extract video description
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage) mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description') self._downloader.trouble(u'ERROR: unable to extract video description')
return return
video_description = mobj.group(1).decode('utf-8') video_description = mobj.group(1).decode('utf-8')
if not video_description: if not video_description:
video_description = 'No description available.' video_description = 'No description available.'
# Extract video thumbnail # Extract video thumbnail
if self._downloader.params.get('forcethumbnail', False): if self._downloader.params.get('forcethumbnail', False):
request = compat_urllib_request.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) request = compat_urllib_request.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage) mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(1) video_thumbnail = mobj.group(1)
else: # we need something to pass to process_info else: # we need something to pass to process_info
video_thumbnail = '' video_thumbnail = ''
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]
class PhotobucketIE(InfoExtractor): class PhotobucketIE(InfoExtractor):
"""Information extractor for photobucket.com.""" """Information extractor for photobucket.com."""
_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
IE_NAME = u'photobucket' IE_NAME = u'photobucket'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
video_extension = 'flv' video_extension = 'flv'
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader, and title from webpage # Extract URL, uploader, and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mediaURL = compat_urllib_parse.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_url = mediaURL video_url = mediaURL
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
video_uploader = mobj.group(2).decode('utf-8') video_uploader = mobj.group(2).decode('utf-8')
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]
class YahooIE(InfoExtractor): class YahooIE(InfoExtractor):
"""Information extractor for video.yahoo.com.""" """Information extractor for video.yahoo.com."""
# _VALID_URL matches all Yahoo! Video URLs # _VALID_URL matches all Yahoo! Video URLs
# _VPAGE_URL matches only the extractable '/watch/' URLs # _VPAGE_URL matches only the extractable '/watch/' URLs
_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
IE_NAME = u'video.yahoo' IE_NAME = u'video.yahoo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
video_extension = 'flv' video_extension = 'flv'
# Rewrite valid but non-extractable URLs as # Rewrite valid but non-extractable URLs as
# extractable English language /watch/ URLs # extractable English language /watch/ URLs
if re.match(self._VPAGE_URL, url) is None: if re.match(self._VPAGE_URL, url) is None:
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract id field') self._downloader.trouble(u'ERROR: Unable to extract id field')
return return
yahoo_id = mobj.group(1) yahoo_id = mobj.group(1)
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract vid field') self._downloader.trouble(u'ERROR: Unable to extract vid field')
return return
yahoo_vid = mobj.group(1) yahoo_vid = mobj.group(1)
url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
return self._real_extract(url, new_video=False) return self._real_extract(url, new_video=False)
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract uploader and title from webpage # Extract uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader') self._downloader.trouble(u'ERROR: unable to extract video uploader')
return return
video_uploader = mobj.group(1).decode('utf-8') video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(1).decode('utf-8') video_thumbnail = mobj.group(1).decode('utf-8')
# Extract video description # Extract video description
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description') self._downloader.trouble(u'ERROR: unable to extract video description')
return return
video_description = mobj.group(1).decode('utf-8') video_description = mobj.group(1).decode('utf-8')
if not video_description: if not video_description:
video_description = 'No description available.' video_description = 'No description available.'
# Extract video height and width # Extract video height and width
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video height') self._downloader.trouble(u'ERROR: unable to extract video height')
return return
yv_video_height = mobj.group(1) yv_video_height = mobj.group(1)
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video width') self._downloader.trouble(u'ERROR: unable to extract video width')
return return
yv_video_width = mobj.group(1) yv_video_width = mobj.group(1)
# Retrieve video playlist to extract media URL # Retrieve video playlist to extract media URL
# I'm not completely sure what all these options are, but we # I'm not completely sure what all these options are, but we
# seem to need most of them, otherwise the server sends a 401. # seem to need most of them, otherwise the server sends a 401.
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
yv_bitrate = '700' # according to Wikipedia this is hard-coded yv_bitrate = '700' # according to Wikipedia this is hard-coded
request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract media URL from playlist XML # Extract media URL from playlist XML
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract media URL') self._downloader.trouble(u'ERROR: Unable to extract media URL')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
video_url = unescapeHTML(video_url) video_url = unescapeHTML(video_url)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url, 'url': video_url,
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'thumbnail': video_thumbnail.decode('utf-8'), 'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description, 'description': video_description,
}] }]
class VimeoIE(InfoExtractor): class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com.""" """Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs # _VALID_URL matches Vimeo URLs
_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)' _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
IE_NAME = u'vimeo' IE_NAME = u'vimeo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers) request = compat_urllib_request.Request(url, None, std_headers)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Now we begin extracting as much information as we can from what we # Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors, # retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific. # and latter we extract those that are Vimeo specific.
self.report_extraction(video_id) self.report_extraction(video_id)
# Extract the config JSON # Extract the config JSON
config = webpage.split(' = {config:')[1].split(',assets:')[0] config = webpage.split(' = {config:')[1].split(',assets:')[0]
try: try:
config = json.loads(config) config = json.loads(config)
except: except:
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.trouble(u'ERROR: unable to extract info section')
return return
# Extract title # Extract title
video_title = config["video"]["title"] video_title = config["video"]["title"]
# Extract uploader # Extract uploader
video_uploader = config["video"]["owner"]["name"] video_uploader = config["video"]["owner"]["name"]
# Extract video thumbnail # Extract video thumbnail
video_thumbnail = config["video"]["thumbnail"] video_thumbnail = config["video"]["thumbnail"]
# Extract video description # Extract video description
video_description = get_element_by_id("description", webpage.decode('utf8')) video_description = get_element_by_id("description", webpage.decode('utf8'))
if video_description: video_description = clean_html(video_description) if video_description: video_description = clean_html(video_description)
else: video_description = '' else: video_description = ''
# Extract upload date # Extract upload date
video_upload_date = None video_upload_date = None
mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage) mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
if mobj is not None: if mobj is not None:
video_upload_date = mobj.group(1) video_upload_date = mobj.group(1)
# Vimeo specific: extract request signature and timestamp # Vimeo specific: extract request signature and timestamp
sig = config['request']['signature'] sig = config['request']['signature']
timestamp = config['request']['timestamp'] timestamp = config['request']['timestamp']
# Vimeo specific: extract video codec and quality information # Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything # First consider quality, then codecs, then take everything
# TODO bind to format param # TODO bind to format param
codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
files = { 'hd': [], 'sd': [], 'other': []} files = { 'hd': [], 'sd': [], 'other': []}
for codec_name, codec_extension in codecs: for codec_name, codec_extension in codecs:
if codec_name in config["video"]["files"]: if codec_name in config["video"]["files"]:
if 'hd' in config["video"]["files"][codec_name]: if 'hd' in config["video"]["files"][codec_name]:
files['hd'].append((codec_name, codec_extension, 'hd')) files['hd'].append((codec_name, codec_extension, 'hd'))
elif 'sd' in config["video"]["files"][codec_name]: elif 'sd' in config["video"]["files"][codec_name]:
files['sd'].append((codec_name, codec_extension, 'sd')) files['sd'].append((codec_name, codec_extension, 'sd'))
else: else:
files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
for quality in ('hd', 'sd', 'other'): for quality in ('hd', 'sd', 'other'):
if len(files[quality]) > 0: if len(files[quality]) > 0:
video_quality = files[quality][0][2] video_quality = files[quality][0][2]
video_codec = files[quality][0][0] video_codec = files[quality][0][0]
video_extension = files[quality][0][1] video_extension = files[quality][0][1]
self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break break
else: else:
self._downloader.trouble(u'ERROR: no known codec found') self._downloader.trouble(u'ERROR: no known codec found')
return return
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, video_quality, video_codec.upper()) %(video_id, sig, timestamp, video_quality, video_codec.upper())
return [{ return [{
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': video_upload_date, 'upload_date': video_upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension, 'ext': video_extension,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
}] }]
class ArteTvIE(InfoExtractor): class ArteTvIE(InfoExtractor):
"""arte.tv information extractor.""" """arte.tv information extractor."""
_VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
_LIVE_URL = r'index-[0-9]+\.html$' _LIVE_URL = r'index-[0-9]+\.html$'
IE_NAME = u'arte.tv' IE_NAME = u'arte.tv'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id) self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
def fetch_webpage(self, url): def fetch_webpage(self, url):
self._downloader.increment_downloads() self._downloader.increment_downloads()
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(url) self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
except ValueError as err: except ValueError as err:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
return webpage return webpage
def grep_webpage(self, url, regex, regexFlags, matchTuples): def grep_webpage(self, url, regex, regexFlags, matchTuples):
page = self.fetch_webpage(url) page = self.fetch_webpage(url)
mobj = re.search(regex, page, regexFlags) mobj = re.search(regex, page, regexFlags)
info = {} info = {}
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
for (i, key, err) in matchTuples: for (i, key, err) in matchTuples:
if mobj.group(i) is None: if mobj.group(i) is None:
self._downloader.trouble(err) self._downloader.trouble(err)
return return
else: else:
info[key] = mobj.group(i) info[key] = mobj.group(i)
return info return info
def extractLiveStream(self, url): def extractLiveStream(self, url):
video_lang = url.split('/')[-4] video_lang = url.split('/')[-4]
info = self.grep_webpage( info = self.grep_webpage(
url, url,
r'src="(.*?/videothek_js.*?\.js)', r'src="(.*?/videothek_js.*?\.js)',
0, 0,
[ [
(1, 'url', u'ERROR: Invalid URL: %s' % url) (1, 'url', u'ERROR: Invalid URL: %s' % url)
] ]
) )
http_host = url.split('/')[2] http_host = url.split('/')[2]
next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
info = self.grep_webpage( info = self.grep_webpage(
next_url, next_url,
r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
'(http://.*?\.swf).*?' + '(http://.*?\.swf).*?' +
'(rtmp://.*?)\'', '(rtmp://.*?)\'',
re.DOTALL, re.DOTALL,
[ [
(1, 'path', u'ERROR: could not extract video path: %s' % url), (1, 'path', u'ERROR: could not extract video path: %s' % url),
(2, 'player', u'ERROR: could not extract video player: %s' % url), (2, 'player', u'ERROR: could not extract video player: %s' % url),
(3, 'url', u'ERROR: could not extract video url: %s' % url) (3, 'url', u'ERROR: could not extract video url: %s' % url)
] ]
) )
video_url = u'%s/%s' % (info.get('url'), info.get('path')) video_url = u'%s/%s' % (info.get('url'), info.get('path'))
def extractPlus7Stream(self, url): def extractPlus7Stream(self, url):
video_lang = url.split('/')[-3] video_lang = url.split('/')[-3]
info = self.grep_webpage( info = self.grep_webpage(
url, url,
r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
0, 0,
[ [
(1, 'url', u'ERROR: Invalid URL: %s' % url) (1, 'url', u'ERROR: Invalid URL: %s' % url)
] ]
) )
next_url = compat_urllib_parse.unquote(info.get('url')) next_url = compat_urllib_parse.unquote(info.get('url'))
info = self.grep_webpage( info = self.grep_webpage(
next_url, next_url,
r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang, r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
0, 0,
[ [
(1, 'url', u'ERROR: Could not find <video> tag: %s' % url) (1, 'url', u'ERROR: Could not find <video> tag: %s' % url)
] ]
) )
next_url = compat_urllib_parse.unquote(info.get('url')) next_url = compat_urllib_parse.unquote(info.get('url'))
info = self.grep_webpage( info = self.grep_webpage(
next_url, next_url,
r'<video id="(.*?)".*?>.*?' + r'<video id="(.*?)".*?>.*?' +
'<name>(.*?)</name>.*?' + '<name>(.*?)</name>.*?' +
'<dateVideo>(.*?)</dateVideo>.*?' + '<dateVideo>(.*?)</dateVideo>.*?' +
'<url quality="hd">(.*?)</url>', '<url quality="hd">(.*?)</url>',
re.DOTALL, re.DOTALL,
[ [
(1, 'id', u'ERROR: could not extract video id: %s' % url), (1, 'id', u'ERROR: could not extract video id: %s' % url),
(2, 'title', u'ERROR: could not extract video title: %s' % url), (2, 'title', u'ERROR: could not extract video title: %s' % url),
(3, 'date', u'ERROR: could not extract video date: %s' % url), (3, 'date', u'ERROR: could not extract video date: %s' % url),
(4, 'url', u'ERROR: could not extract video url: %s' % url) (4, 'url', u'ERROR: could not extract video url: %s' % url)
] ]
) )
return { return {
'id': info.get('id'), 'id': info.get('id'),
'url': compat_urllib_parse.unquote(info.get('url')), 'url': compat_urllib_parse.unquote(info.get('url')),
'uploader': u'arte.tv', 'uploader': u'arte.tv',
'upload_date': info.get('date'), 'upload_date': info.get('date'),
'title': info.get('title'), 'title': info.get('title'),
'ext': u'mp4', 'ext': u'mp4',
'format': u'NA', 'format': u'NA',
'player_url': None, 'player_url': None,
} }
def _real_extract(self, url): def _real_extract(self, url):
video_id = url.split('/')[-1] video_id = url.split('/')[-1]
self.report_extraction(video_id) self.report_extraction(video_id)
if re.search(self._LIVE_URL, video_id) is not None: if re.search(self._LIVE_URL, video_id) is not None:
self.extractLiveStream(url) self.extractLiveStream(url)
return return
else: else:
info = self.extractPlus7Stream(url) info = self.extractPlus7Stream(url)
return [info] return [info]
class GenericIE(InfoExtractor): class GenericIE(InfoExtractor):
"""Generic last-resort information extractor.""" """Generic last-resort information extractor."""
_VALID_URL = r'.*' _VALID_URL = r'.*'
IE_NAME = u'generic' IE_NAME = u'generic'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
def report_following_redirect(self, new_url): def report_following_redirect(self, new_url):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url): def _test_redirect(self, url):
"""Check if it is a redirect, like url shorteners, in case restart chain.""" """Check if it is a redirect, like url shorteners, in case restart chain."""
class HeadRequest(compat_urllib_request.Request): class HeadRequest(compat_urllib_request.Request):
def get_method(self): def get_method(self):
return "HEAD" return "HEAD"
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
""" """
Subclass the HTTPRedirectHandler to make it use our Subclass the HTTPRedirectHandler to make it use our
HeadRequest also on the redirected URL HeadRequest also on the redirected URL
""" """
def redirect_request(self, req, fp, code, msg, headers, newurl): def redirect_request(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307): if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20') newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items() newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")) if k.lower() not in ("content-length", "content-type"))
return HeadRequest(newurl, return HeadRequest(newurl,
headers=newheaders, headers=newheaders,
origin_req_host=req.get_origin_req_host(), origin_req_host=req.get_origin_req_host(),
unverifiable=True) unverifiable=True)
else: else:
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
class HTTPMethodFallback(compat_urllib_request.BaseHandler): class HTTPMethodFallback(compat_urllib_request.BaseHandler):
""" """
Fallback to GET if HEAD is not allowed (405 HTTP error) Fallback to GET if HEAD is not allowed (405 HTTP error)
""" """
def http_error_405(self, req, fp, code, msg, headers): def http_error_405(self, req, fp, code, msg, headers):
fp.read() fp.read()
fp.close() fp.close()
newheaders = dict((k,v) for k,v in req.headers.items() newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")) if k.lower() not in ("content-length", "content-type"))
return self.parent.open(compat_urllib_request.Request(req.get_full_url(), return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
headers=newheaders, headers=newheaders,
origin_req_host=req.get_origin_req_host(), origin_req_host=req.get_origin_req_host(),
unverifiable=True)) unverifiable=True))
# Build our opener # Build our opener
opener = compat_urllib_request.OpenerDirector() opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler, for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler, HTTPMethodFallback, HEADRedirectHandler,
compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler()) opener.add_handler(handler())
response = opener.open(HeadRequest(url)) response = opener.open(HeadRequest(url))
new_url = response.geturl() new_url = response.geturl()
if url == new_url: if url == new_url:
return False return False
self.report_following_redirect(new_url) self.report_following_redirect(new_url)
self._downloader.download([new_url]) self._downloader.download([new_url])
return True return True
def _real_extract(self, url): def _real_extract(self, url):
if self._test_redirect(url): return if self._test_redirect(url): return
video_id = url.split('/')[-1] video_id = url.split('/')[-1]
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
except ValueError as err: except ValueError as err:
# since this is the last-resort InfoExtractor, if # since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here # this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
self.report_extraction(video_id) self.report_extraction(video_id)
# Start with something easy: JW Player in SWFObject # Start with something easy: JW Player in SWFObject
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if mobj is None: if mobj is None:
# Broaden the search a little bit # Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
# It's possible that one of the regexes # It's possible that one of the regexes
# matched, but returned an empty group: # matched, but returned an empty group:
if mobj.group(1) is None: if mobj.group(1) is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_url = compat_urllib_parse.unquote(mobj.group(1)) video_url = compat_urllib_parse.unquote(mobj.group(1))
video_id = os.path.basename(video_url) video_id = os.path.basename(video_url)
# here's a fun little line of code for you: # here's a fun little line of code for you:
video_extension = os.path.splitext(video_id)[1][1:] video_extension = os.path.splitext(video_id)[1][1:]
video_id = os.path.splitext(video_id)[0] video_id = os.path.splitext(video_id)[0]
# it's tempting to parse this further, but you would # it's tempting to parse this further, but you would
# have to take into account all the variations like # have to take into account all the variations like
# Video Title - Site Name # Video Title - Site Name
# Site Name | Video Title # Site Name | Video Title
# Video Title - Tagline | Site Name # Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical # and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage) mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
# video uploader is domain name # video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_uploader = mobj.group(1).decode('utf-8') video_uploader = mobj.group(1).decode('utf-8')
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]
class YoutubeSearchIE(InfoExtractor): class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries.""" """Information Extractor for YouTube search queries."""
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_max_youtube_results = 1000 _max_youtube_results = 1000
IE_NAME = u'youtube:search' IE_NAME = u'youtube:search'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum): def report_download_page(self, query, pagenum):
"""Report attempt to download search page with given number.""" """Report attempt to download search page with given number."""
query = query.decode(preferredencoding()) query = query.decode(preferredencoding())
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_youtube_results) self._download_n_results(query, self._max_youtube_results)
return return
else: else:
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_youtube_results: elif n > self._max_youtube_results:
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results n = self._max_youtube_results
self._download_n_results(query, n) self._download_n_results(query, n)
return return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
limit = n limit = n
while (50 * pagenum) < limit: while (50 * pagenum) < limit:
self.report_download_page(query, pagenum+1) self.report_download_page(query, pagenum+1)
result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1) result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
request = compat_urllib_request.Request(result_url) request = compat_urllib_request.Request(result_url)
try: try:
data = compat_urllib_request.urlopen(request).read() data = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
return return
api_response = json.loads(data)['data'] api_response = json.loads(data)['data']
new_ids = list(video['id'] for video in api_response['items']) new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids video_ids += new_ids
limit = min(n, api_response['totalItems']) limit = min(n, api_response['totalItems'])
pagenum += 1 pagenum += 1
if len(video_ids) > n: if len(video_ids) > n:
video_ids = video_ids[:n] video_ids = video_ids[:n]
for id in video_ids: for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return return
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries.""" """Information Extractor for Google Video search queries."""
_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
_VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)' _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
_MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"' _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
_max_google_results = 1000 _max_google_results = 1000
IE_NAME = u'video.google:search' IE_NAME = u'video.google:search'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum): def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding()) query = query.decode(preferredencoding())
self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum)) self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_google_results) self._download_n_results(query, self._max_google_results)
return return
else: else:
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_google_results: elif n > self._max_google_results:
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results n = self._max_google_results
self._download_n_results(query, n) self._download_n_results(query, n)
return return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
while True: while True:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10) result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10)
request = compat_urllib_request.Request(result_url) request = compat_urllib_request.Request(result_url)
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1) video_id = mobj.group(1)
if video_id not in video_ids: if video_id not in video_ids:
video_ids.append(video_id) video_ids.append(video_id)
if len(video_ids) == n: if len(video_ids) == n:
# Specified n videos reached # Specified n videos reached
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return return
pagenum = pagenum + 1 pagenum = pagenum + 1
class YahooSearchIE(InfoExtractor): class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries.""" """Information Extractor for Yahoo! Video search queries."""
_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
_MORE_PAGES_INDICATOR = r'\s*Next' _MORE_PAGES_INDICATOR = r'\s*Next'
_max_yahoo_results = 1000 _max_yahoo_results = 1000
IE_NAME = u'video.yahoo:search' IE_NAME = u'video.yahoo:search'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum): def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding()) query = query.decode(preferredencoding())
self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_yahoo_results) self._download_n_results(query, self._max_yahoo_results)
return return
else: else:
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_yahoo_results: elif n > self._max_yahoo_results:
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results n = self._max_yahoo_results
self._download_n_results(query, n) self._download_n_results(query, n)
return return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
already_seen = set() already_seen = set()
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum) result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum)
request = compat_urllib_request.Request(result_url) request = compat_urllib_request.Request(result_url)
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1) video_id = mobj.group(1)
if video_id not in already_seen: if video_id not in already_seen:
video_ids.append(video_id) video_ids.append(video_id)
already_seen.add(video_id) already_seen.add(video_id)
if len(video_ids) == n: if len(video_ids) == n:
# Specified n videos reached # Specified n videos reached
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return return
pagenum = pagenum + 1 pagenum = pagenum + 1
class YoutubePlaylistIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists.""" """Information Extractor for YouTube playlists."""
_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s'
_MORE_PAGES_INDICATOR = r'yt-uix-pager-next' _MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, playlist_id, pagenum): def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract playlist id # Extract playlist id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
# Single video case # Single video case
if mobj.group(3) is not None: if mobj.group(3) is not None:
self._downloader.download([mobj.group(3)]) self._downloader.download([mobj.group(3)])
return return
# Download playlist pages # Download playlist pages
# prefix is 'p' as default for playlists but there are other types that need extra care # prefix is 'p' as default for playlists but there are other types that need extra care
playlist_prefix = mobj.group(1) playlist_prefix = mobj.group(1)
if playlist_prefix == 'a': if playlist_prefix == 'a':
playlist_access = 'artist' playlist_access = 'artist'
else: else:
playlist_prefix = 'p' playlist_prefix = 'p'
playlist_access = 'view_play_list' playlist_access = 'view_play_list'
playlist_id = mobj.group(2) playlist_id = mobj.group(2)
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(playlist_id, pagenum) self.report_download_page(playlist_id, pagenum)
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if re.search(self._MORE_PAGES_INDICATOR, page) is None:
break break
pagenum = pagenum + 1 pagenum = pagenum + 1
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] video_ids = video_ids[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] video_ids = video_ids[playliststart:playlistend]
for id in video_ids: for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return return
class YoutubeChannelIE(InfoExtractor): class YoutubeChannelIE(InfoExtractor):
"""Information Extractor for YouTube channels.""" """Information Extractor for YouTube channels."""
_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$" _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
_TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
_MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO _MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO
IE_NAME = u'youtube:channel' IE_NAME = u'youtube:channel'
def report_download_page(self, channel_id, pagenum): def report_download_page(self, channel_id, pagenum):
"""Report attempt to download channel page with given number.""" """Report attempt to download channel page with given number."""
self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum)) self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract channel id # Extract channel id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
# Download channel pages # Download channel pages
channel_id = mobj.group(1) channel_id = mobj.group(1)
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(channel_id, pagenum) self.report_download_page(channel_id, pagenum)
url = self._TEMPLATE_URL % (channel_id, pagenum) url = self._TEMPLATE_URL % (channel_id, pagenum)
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page): for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if re.search(self._MORE_PAGES_INDICATOR, page) is None:
break break
pagenum = pagenum + 1 pagenum = pagenum + 1
for id in video_ids: for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return return
class YoutubeUserIE(InfoExtractor): class YoutubeUserIE(InfoExtractor):
"""Information Extractor for YouTube users.""" """Information Extractor for YouTube users."""
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50 _GDATA_PAGE_SIZE = 50
_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
IE_NAME = u'youtube:user' IE_NAME = u'youtube:user'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, username, start_index): def report_download_page(self, username, start_index):
"""Report attempt to download user page.""" """Report attempt to download user page."""
self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
(username, start_index, start_index + self._GDATA_PAGE_SIZE)) (username, start_index, start_index + self._GDATA_PAGE_SIZE))
def _real_extract(self, url): def _real_extract(self, url):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
# Download video ids using YouTube Data API. Result size per # Download video ids using YouTube Data API. Result size per
# query is limited (currently to 50 videos) so we need to query # query is limited (currently to 50 videos) so we need to query
# page by page until there are no video ids - it means we got # page by page until there are no video ids - it means we got
# all of them. # all of them.
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
while True: while True:
start_index = pagenum * self._GDATA_PAGE_SIZE + 1 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
self.report_download_page(username, start_index) self.report_download_page(username, start_index)
request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
# A little optimization - if current page is not # A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then # "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there # we can assume that this page is the last one - there
# are no more ids on further pages - no need to query # are no more ids on further pages - no need to query
# again. # again.
if len(ids_in_page) < self._GDATA_PAGE_SIZE: if len(ids_in_page) < self._GDATA_PAGE_SIZE:
break break
pagenum += 1 pagenum += 1
all_ids_count = len(video_ids) all_ids_count = len(video_ids)
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] video_ids = video_ids[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] video_ids = video_ids[playliststart:playlistend]
self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
(username, all_ids_count, len(video_ids))) (username, all_ids_count, len(video_ids)))
for video_id in video_ids: for video_id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
class BlipTVUserIE(InfoExtractor): class BlipTVUserIE(InfoExtractor):
"""Information Extractor for blip.tv users.""" """Information Extractor for blip.tv users."""
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
_PAGE_SIZE = 12 _PAGE_SIZE = 12
IE_NAME = u'blip.tv:user' IE_NAME = u'blip.tv:user'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, username, pagenum): def report_download_page(self, username, pagenum):
"""Report attempt to download user page.""" """Report attempt to download user page."""
self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' % self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
(self.IE_NAME, username, pagenum)) (self.IE_NAME, username, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
mobj = re.search(r'data-users-id="([^"]+)"', page) mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1) page_base = page_base % mobj.group(1)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Download video ids using BlipTV Ajax calls. Result size per # Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query # query is limited (currently to 12 videos) so we need to query
# page by page until there are no video ids - it means we got # page by page until there are no video ids - it means we got
# all of them. # all of them.
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(username, pagenum) self.report_download_page(username, pagenum)
request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) ) request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(r'href="/([^"]+)"', page): for mobj in re.finditer(r'href="/([^"]+)"', page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(unescapeHTML(mobj.group(1))) ids_in_page.append(unescapeHTML(mobj.group(1)))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
# A little optimization - if current page is not # A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then # "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there # we can assume that this page is the last one - there
# are no more ids on further pages - no need to query # are no more ids on further pages - no need to query
# again. # again.
if len(ids_in_page) < self._PAGE_SIZE: if len(ids_in_page) < self._PAGE_SIZE:
break break
pagenum += 1 pagenum += 1
all_ids_count = len(video_ids) all_ids_count = len(video_ids)
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] video_ids = video_ids[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] video_ids = video_ids[playliststart:playlistend]
self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" % self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
(self.IE_NAME, username, all_ids_count, len(video_ids))) (self.IE_NAME, username, all_ids_count, len(video_ids)))
for video_id in video_ids: for video_id in video_ids:
self._downloader.download([u'http://blip.tv/'+video_id]) self._downloader.download([u'http://blip.tv/'+video_id])
class DepositFilesIE(InfoExtractor): class DepositFilesIE(InfoExtractor):
"""Information extractor for depositfiles.com""" """Information extractor for depositfiles.com"""
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
IE_NAME = u'DepositFiles' IE_NAME = u'DepositFiles'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, file_id): def report_download_webpage(self, file_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
def report_extraction(self, file_id): def report_extraction(self, file_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
def _real_extract(self, url): def _real_extract(self, url):
file_id = url.split('/')[-1] file_id = url.split('/')[-1]
# Rebuild url in english locale # Rebuild url in english locale
url = 'http://depositfiles.com/en/files/' + file_id url = 'http://depositfiles.com/en/files/' + file_id
# Retrieve file webpage with 'Free download' button pressed # Retrieve file webpage with 'Free download' button pressed
free_download_indication = { 'gateway_result' : '1' } free_download_indication = { 'gateway_result' : '1' }
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication)) request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
try: try:
self.report_download_webpage(file_id) self.report_download_webpage(file_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
return return
# Search for the real file URL # Search for the real file URL
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
if (mobj is None) or (mobj.group(1) is None): if (mobj is None) or (mobj.group(1) is None):
# Try to figure out reason of the error. # Try to figure out reason of the error.
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None): if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
self._downloader.trouble(u'ERROR: %s' % restriction_message) self._downloader.trouble(u'ERROR: %s' % restriction_message)
else: else:
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
return return
file_url = mobj.group(1) file_url = mobj.group(1)
file_extension = os.path.splitext(file_url)[1][1:] file_extension = os.path.splitext(file_url)[1][1:]
# Search for file title # Search for file title
mobj = re.search(r'<b title="(.*?)">', webpage) mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
file_title = mobj.group(1).decode('utf-8') file_title = mobj.group(1).decode('utf-8')
return [{ return [{
'id': file_id.decode('utf-8'), 'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'), 'url': file_url.decode('utf-8'),
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': file_title, 'title': file_title,
'ext': file_extension.decode('utf-8'), 'ext': file_extension.decode('utf-8'),
}] }]
class FacebookIE(InfoExtractor): class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook""" """Information Extractor for Facebook"""
_WORKING = False _WORKING = False
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_NETRC_MACHINE = 'facebook' _NETRC_MACHINE = 'facebook'
_available_formats = ['video', 'highqual', 'lowqual'] _available_formats = ['video', 'highqual', 'lowqual']
_video_extensions = { _video_extensions = {
'video': 'mp4', 'video': 'mp4',
'highqual': 'mp4', 'highqual': 'mp4',
'lowqual': 'mp4', 'lowqual': 'mp4',
} }
IE_NAME = u'facebook' IE_NAME = u'facebook'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def _reporter(self, message): def _reporter(self, message):
"""Add header and report message.""" """Add header and report message."""
self._downloader.to_screen(u'[facebook] %s' % message) self._downloader.to_screen(u'[facebook] %s' % message)
def report_login(self): def report_login(self):
"""Report attempt to log in.""" """Report attempt to log in."""
self._reporter(u'Logging in') self._reporter(u'Logging in')
def report_video_webpage_download(self, video_id): def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage.""" """Report attempt to download video webpage."""
self._reporter(u'%s: Downloading video webpage' % video_id) self._reporter(u'%s: Downloading video webpage' % video_id)
def report_information_extraction(self, video_id): def report_information_extraction(self, video_id):
"""Report attempt to extract video information.""" """Report attempt to extract video information."""
self._reporter(u'%s: Extracting video information' % video_id) self._reporter(u'%s: Extracting video information' % video_id)
def _parse_page(self, video_webpage): def _parse_page(self, video_webpage):
"""Extract video information from page""" """Extract video information from page"""
# General data # General data
data = {'title': r'\("video_title", "(.*?)"\)', data = {'title': r'\("video_title", "(.*?)"\)',
'description': r'<div class="datawrap">(.*?)</div>', 'description': r'<div class="datawrap">(.*?)</div>',
'owner': r'\("video_owner_name", "(.*?)"\)', 'owner': r'\("video_owner_name", "(.*?)"\)',
'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)', 'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
} }
video_info = {} video_info = {}
for piece in data.keys(): for piece in data.keys():
mobj = re.search(data[piece], video_webpage) mobj = re.search(data[piece], video_webpage)
if mobj is not None: if mobj is not None:
video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
# Video urls # Video urls
video_urls = {} video_urls = {}
for fmt in self._available_formats: for fmt in self._available_formats:
mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
if mobj is not None: if mobj is not None:
# URL is in a Javascript segment inside an escaped Unicode format within # URL is in a Javascript segment inside an escaped Unicode format within
# the generally utf-8 page # the generally utf-8 page
video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
video_info['video_urls'] = video_urls video_info['video_urls'] = video_urls
return video_info return video_info
def _real_initialize(self): def _real_initialize(self):
if self._downloader is None: if self._downloader is None:
return return
useremail = None useremail = None
password = None password = None
downloader_params = self._downloader.params downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data # Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None: if downloader_params.get('username', None) is not None:
useremail = downloader_params['username'] useremail = downloader_params['username']
password = downloader_params['password'] password = downloader_params['password']
elif downloader_params.get('usenetrc', False): elif downloader_params.get('usenetrc', False):
try: try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE) info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None: if info is not None:
useremail = info[0] useremail = info[0]
password = info[2] password = info[2]
else: else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return return
if useremail is None: if useremail is None:
return return
# Log in # Log in
login_form = { login_form = {
'email': useremail, 'email': useremail,
'pass': password, 'pass': password,
'login': 'Log+In' 'login': 'Log+In'
} }
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
try: try:
self.report_login() self.report_login()
login_results = compat_urllib_request.urlopen(request).read() login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group('ID') video_id = mobj.group('ID')
# Get video webpage # Get video webpage
self.report_video_webpage_download(video_id) self.report_video_webpage_download(video_id)
request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
try: try:
page = compat_urllib_request.urlopen(request) page = compat_urllib_request.urlopen(request)
video_webpage = page.read() video_webpage = page.read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
# Start extracting information # Start extracting information
self.report_information_extraction(video_id) self.report_information_extraction(video_id)
# Extract information # Extract information
video_info = self._parse_page(video_webpage) video_info = self._parse_page(video_webpage)
# uploader # uploader
if 'owner' not in video_info: if 'owner' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return return
video_uploader = video_info['owner'] video_uploader = video_info['owner']
# title # title
if 'title' not in video_info: if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = video_info['title'] video_title = video_info['title']
video_title = video_title.decode('utf-8') video_title = video_title.decode('utf-8')
# thumbnail image # thumbnail image
if 'thumbnail' not in video_info: if 'thumbnail' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail') self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = '' video_thumbnail = ''
else: else:
video_thumbnail = video_info['thumbnail'] video_thumbnail = video_info['thumbnail']
# upload date # upload date
upload_date = None upload_date = None
if 'upload_date' in video_info: if 'upload_date' in video_info:
upload_time = video_info['upload_date'] upload_time = video_info['upload_date']
timetuple = email.utils.parsedate_tz(upload_time) timetuple = email.utils.parsedate_tz(upload_time)
if timetuple is not None: if timetuple is not None:
try: try:
upload_date = time.strftime('%Y%m%d', timetuple[0:9]) upload_date = time.strftime('%Y%m%d', timetuple[0:9])
except: except:
pass pass
# description # description
video_description = video_info.get('description', 'No description available.') video_description = video_info.get('description', 'No description available.')
url_map = video_info['video_urls'] url_map = video_info['video_urls']
if len(url_map.keys()) > 0: if len(url_map.keys()) > 0:
# Decide which formats to download # Decide which formats to download
req_format = self._downloader.params.get('format', None) req_format = self._downloader.params.get('format', None)
format_limit = self._downloader.params.get('format_limit', None) format_limit = self._downloader.params.get('format_limit', None)
if format_limit is not None and format_limit in self._available_formats: if format_limit is not None and format_limit in self._available_formats:
format_list = self._available_formats[self._available_formats.index(format_limit):] format_list = self._available_formats[self._available_formats.index(format_limit):]
else: else:
format_list = self._available_formats format_list = self._available_formats
existing_formats = [x for x in format_list if x in url_map] existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0: if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video') self._downloader.trouble(u'ERROR: no known formats available for video')
return return
if req_format is None: if req_format is None:
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst': elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
elif req_format == '-1': elif req_format == '-1':
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else: else:
# Specific format # Specific format
if req_format not in url_map: if req_format not in url_map:
self._downloader.trouble(u'ERROR: requested format not available') self._downloader.trouble(u'ERROR: requested format not available')
return return
video_url_list = [(req_format, url_map[req_format])] # Specific format video_url_list = [(req_format, url_map[req_format])] # Specific format
results = [] results = []
for format_param, video_real_url in video_url_list: for format_param, video_real_url in video_url_list:
# Extension # Extension
video_extension = self._video_extensions.get(format_param, 'mp4') video_extension = self._video_extensions.get(format_param, 'mp4')
results.append({ results.append({
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_real_url.decode('utf-8'), 'url': video_real_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'),
'upload_date': upload_date, 'upload_date': upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': video_thumbnail.decode('utf-8'), 'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description.decode('utf-8'), 'description': video_description.decode('utf-8'),
}) })
return results return results
class BlipTVIE(InfoExtractor): class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv""" """Information extractor for blip.tv"""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
_URL_EXT = r'^.*\.([a-z0-9]+)$' _URL_EXT = r'^.*\.([a-z0-9]+)$'
IE_NAME = u'blip.tv' IE_NAME = u'blip.tv'
def report_extraction(self, file_id): def report_extraction(self, file_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def report_direct_download(self, title): def report_direct_download(self, title):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if '?' in url: if '?' in url:
cchar = '&' cchar = '&'
else: else:
cchar = '?' cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1' json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url.encode('utf-8')) request = compat_urllib_request.Request(json_url.encode('utf-8'))
self.report_extraction(mobj.group(1)) self.report_extraction(mobj.group(1))
info = None info = None
try: try:
urlh = compat_urllib_request.urlopen(request) urlh = compat_urllib_request.urlopen(request)
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
basename = url.split('/')[-1] basename = url.split('/')[-1]
title,ext = os.path.splitext(basename) title,ext = os.path.splitext(basename)
title = title.decode('UTF-8') title = title.decode('UTF-8')
ext = ext.replace('.', '') ext = ext.replace('.', '')
self.report_direct_download(title) self.report_direct_download(title)
info = { info = {
'id': title, 'id': title,
'url': url, 'url': url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': title, 'title': title,
'ext': ext, 'ext': ext,
'urlhandle': urlh 'urlhandle': urlh
} }
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
return return
if info is None: # Regular URL if info is None: # Regular URL
try: try:
json_code = urlh.read() json_code = urlh.read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
return return
try: try:
json_data = json.loads(json_code) json_data = json.loads(json_code)
if 'Post' in json_data: if 'Post' in json_data:
data = json_data['Post'] data = json_data['Post']
else: else:
data = json_data data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url'] video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url) umobj = re.match(self._URL_EXT, video_url)
if umobj is None: if umobj is None:
raise ValueError('Can not determine filename extension') raise ValueError('Can not determine filename extension')
ext = umobj.group(1) ext = umobj.group(1)
info = { info = {
'id': data['item_id'], 'id': data['item_id'],
'url': video_url, 'url': video_url,
'uploader': data['display_name'], 'uploader': data['display_name'],
'upload_date': upload_date, 'upload_date': upload_date,
'title': data['title'], 'title': data['title'],
'ext': ext, 'ext': ext,
'format': data['media']['mimeType'], 'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'], 'thumbnail': data['thumbnailUrl'],
'description': data['description'], 'description': data['description'],
'player_url': data['embedUrl'] 'player_url': data['embedUrl']
} }
except (ValueError,KeyError) as err: except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return return
std_headers['User-Agent'] = 'iTunes/10.6.1' std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info] return [info]
class MyVideoIE(InfoExtractor): class MyVideoIE(InfoExtractor):
"""Information Extractor for myvideo.de.""" """Information Extractor for myvideo.de."""
_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
IE_NAME = u'myvideo' IE_NAME = u'myvideo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
def _real_extract(self,url): def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._download.trouble(u'ERROR: invalid URL: %s' % url) self._download.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Get video webpage # Get video webpage
request = compat_urllib_request.Request('http://www.myvideo.de/watch/%s' % video_id) request = compat_urllib_request.Request('http://www.myvideo.de/watch/%s' % video_id)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />', mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
webpage) webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
video_url = mobj.group(1) + ('/%s.flv' % video_id) video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage) mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
return [{ return [{
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': u'flv', 'ext': u'flv',
}] }]
class ComedyCentralIE(InfoExtractor): class ComedyCentralIE(InfoExtractor):
"""Information extractor for The Daily Show and Colbert Report """ """Information extractor for The Daily Show and Colbert Report """
_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$' _VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
IE_NAME = u'comedycentral' IE_NAME = u'comedycentral'
_available_formats = ['3500', '2200', '1700', '1200', '750', '400'] _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = { _video_extensions = {
'3500': 'mp4', '3500': 'mp4',
'2200': 'mp4', '2200': 'mp4',
'1700': 'mp4', '1700': 'mp4',
'1200': 'mp4', '1200': 'mp4',
'750': 'mp4', '750': 'mp4',
'400': 'mp4', '400': 'mp4',
} }
_video_dimensions = { _video_dimensions = {
'3500': '1280x720', '3500': '1280x720',
'2200': '960x540', '2200': '960x540',
'1700': '768x432', '1700': '768x432',
'1200': '640x360', '1200': '640x360',
'750': '512x288', '750': '512x288',
'400': '384x216', '400': '384x216',
} }
def report_extraction(self, episode_id): def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
def report_config_download(self, episode_id): def report_config_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
def report_index_download(self, episode_id): def report_index_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
def report_player_url(self, episode_id): def report_player_url(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
def _print_formats(self, formats): def _print_formats(self, formats):
print('Available formats:') print('Available formats:')
for x in formats: for x in formats:
print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???'))) print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if mobj.group('shortname'): if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'): if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = u'http://www.thedailyshow.com/full-episodes/' url = u'http://www.thedailyshow.com/full-episodes/'
else: else:
url = u'http://www.colbertnation.com/full-episodes/' url = u'http://www.colbertnation.com/full-episodes/'
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
assert mobj is not None assert mobj is not None
dlNewest = not mobj.group('episode') dlNewest = not mobj.group('episode')
if dlNewest: if dlNewest:
epTitle = mobj.group('showname') epTitle = mobj.group('showname')
else: else:
epTitle = mobj.group('episode') epTitle = mobj.group('episode')
req = compat_urllib_request.Request(url) req = compat_urllib_request.Request(url)
self.report_extraction(epTitle) self.report_extraction(epTitle)
try: try:
htmlHandle = compat_urllib_request.urlopen(req) htmlHandle = compat_urllib_request.urlopen(req)
html = htmlHandle.read() html = htmlHandle.read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
if dlNewest: if dlNewest:
url = htmlHandle.geturl() url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
return return
if mobj.group('episode') == '': if mobj.group('episode') == '':
self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
return return
epTitle = mobj.group('episode') epTitle = mobj.group('episode')
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html) mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html)
if len(mMovieParams) == 0: if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without # The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference # a URL prefix; so extract the alternate reference
# and then add the URL prefix manually. # and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*episode.*?:.*?)"', html) altMovieParams = re.findall('data-mgid="([^"]*episode.*?:.*?)"', html)
if len(altMovieParams) == 0: if len(altMovieParams) == 0:
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
return return
else: else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
playerUrl_raw = mMovieParams[0][0] playerUrl_raw = mMovieParams[0][0]
self.report_player_url(epTitle) self.report_player_url(epTitle)
try: try:
urlHandle = compat_urllib_request.urlopen(playerUrl_raw) urlHandle = compat_urllib_request.urlopen(playerUrl_raw)
playerUrl = urlHandle.geturl() playerUrl = urlHandle.geturl()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
return return
uri = mMovieParams[0][1] uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri}) indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
self.report_index_download(epTitle) self.report_index_download(epTitle)
try: try:
indexXml = compat_urllib_request.urlopen(indexUrl).read() indexXml = compat_urllib_request.urlopen(indexUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
return return
results = [] results = []
idoc = xml.etree.ElementTree.fromstring(indexXml) idoc = xml.etree.ElementTree.fromstring(indexXml)
itemEls = idoc.findall('.//item') itemEls = idoc.findall('.//item')
for itemEl in itemEls: for itemEl in itemEls:
mediaId = itemEl.findall('./guid')[0].text mediaId = itemEl.findall('./guid')[0].text
shortMediaId = mediaId.split(':')[-1] shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '') showId = mediaId.split(':')[-2].replace('.com', '')
officialTitle = itemEl.findall('./title')[0].text officialTitle = itemEl.findall('./title')[0].text
officialDate = itemEl.findall('./pubDate')[0].text officialDate = itemEl.findall('./pubDate')[0].text
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' + configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
compat_urllib_parse.urlencode({'uri': mediaId})) compat_urllib_parse.urlencode({'uri': mediaId}))
configReq = compat_urllib_request.Request(configUrl) configReq = compat_urllib_request.Request(configUrl)
self.report_config_download(epTitle) self.report_config_download(epTitle)
try: try:
configXml = compat_urllib_request.urlopen(configReq).read() configXml = compat_urllib_request.urlopen(configReq).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
cdoc = xml.etree.ElementTree.fromstring(configXml) cdoc = xml.etree.ElementTree.fromstring(configXml)
turls = [] turls = []
for rendition in cdoc.findall('.//rendition'): for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text) finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo) turls.append(finfo)
if len(turls) == 0: if len(turls) == 0:
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
continue continue
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
self._print_formats([i[0] for i in turls]) self._print_formats([i[0] for i in turls])
return return
# For now, just pick the highest bitrate # For now, just pick the highest bitrate
format,video_url = turls[-1] format,video_url = turls[-1]
# Get the format arg from the arg stream # Get the format arg from the arg stream
req_format = self._downloader.params.get('format', None) req_format = self._downloader.params.get('format', None)
# Select format if we can find one # Select format if we can find one
for f,v in turls: for f,v in turls:
if f == req_format: if f == req_format:
format, video_url = f, v format, video_url = f, v
break break
# Patch to download from alternative CDN, which does not # Patch to download from alternative CDN, which does not
# break on current RTMPDump builds # break on current RTMPDump builds
broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/" broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/"
better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/" better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/"
if video_url.startswith(broken_cdn): if video_url.startswith(broken_cdn):
video_url = video_url.replace(broken_cdn, better_cdn) video_url = video_url.replace(broken_cdn, better_cdn)
effTitle = showId + u'-' + epTitle effTitle = showId + u'-' + epTitle
info = { info = {
'id': shortMediaId, 'id': shortMediaId,
'url': video_url, 'url': video_url,
'uploader': showId, 'uploader': showId,
'upload_date': officialDate, 'upload_date': officialDate,
'title': effTitle, 'title': effTitle,
'ext': 'mp4', 'ext': 'mp4',
'format': format, 'format': format,
'thumbnail': None, 'thumbnail': None,
'description': officialTitle, 'description': officialTitle,
'player_url': None #playerUrl 'player_url': None #playerUrl
} }
results.append(info) results.append(info)
return results return results
class EscapistIE(InfoExtractor): class EscapistIE(InfoExtractor):
"""Information extractor for The Escapist """ """Information extractor for The Escapist """
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
IE_NAME = u'escapist' IE_NAME = u'escapist'
def report_extraction(self, showName): def report_extraction(self, showName):
self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
def report_config_download(self, showName): def report_config_download(self, showName):
self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
showName = mobj.group('showname') showName = mobj.group('showname')
videoId = mobj.group('episode') videoId = mobj.group('episode')
self.report_extraction(showName) self.report_extraction(showName)
try: try:
webPage = compat_urllib_request.urlopen(url) webPage = compat_urllib_request.urlopen(url)
webPageBytes = webPage.read() webPageBytes = webPage.read()
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
return return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
description = unescapeHTML(descMatch.group(1)) description = unescapeHTML(descMatch.group(1))
imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage) imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
imgUrl = unescapeHTML(imgMatch.group(1)) imgUrl = unescapeHTML(imgMatch.group(1))
playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage) playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
playerUrl = unescapeHTML(playerUrlMatch.group(1)) playerUrl = unescapeHTML(playerUrlMatch.group(1))
configUrlMatch = re.search('config=(.*)$', playerUrl) configUrlMatch = re.search('config=(.*)$', playerUrl)
configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1)) configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1))
self.report_config_download(showName) self.report_config_download(showName)
try: try:
configJSON = compat_urllib_request.urlopen(configUrl).read() configJSON = compat_urllib_request.urlopen(configUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
return return
# Technically, it's JavaScript, not JSON # Technically, it's JavaScript, not JSON
configJSON = configJSON.replace("'", '"') configJSON = configJSON.replace("'", '"')
try: try:
config = json.loads(configJSON) config = json.loads(configJSON)
except (ValueError,) as err: except (ValueError,) as err:
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
return return
playlist = config['playlist'] playlist = config['playlist']
videoUrl = playlist[1]['url'] videoUrl = playlist[1]['url']
info = { info = {
'id': videoId, 'id': videoId,
'url': videoUrl, 'url': videoUrl,
'uploader': showName, 'uploader': showName,
'upload_date': None, 'upload_date': None,
'title': showName, 'title': showName,
'ext': 'flv', 'ext': 'flv',
'thumbnail': imgUrl, 'thumbnail': imgUrl,
'description': description, 'description': description,
'player_url': playerUrl, 'player_url': playerUrl,
} }
return [info] return [info]
class CollegeHumorIE(InfoExtractor): class CollegeHumorIE(InfoExtractor):
"""Information extractor for collegehumor.com""" """Information extractor for collegehumor.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$' _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
IE_NAME = u'collegehumor' IE_NAME = u'collegehumor'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
self.report_webpage(video_id) self.report_webpage(video_id)
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage) m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
if m is None: if m is None:
self._downloader.trouble(u'ERROR: Cannot extract internal video ID') self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
return return
internal_video_id = m.group('internalvideoid') internal_video_id = m.group('internalvideoid')
info = { info = {
'id': video_id, 'id': video_id,
'internal_id': internal_video_id, 'internal_id': internal_video_id,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
} }
self.report_extraction(video_id) self.report_extraction(video_id)
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
try: try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read() metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
try: try:
videoNode = mdoc.findall('./video')[0] videoNode = mdoc.findall('./video')[0]
info['description'] = videoNode.findall('./description')[0].text info['description'] = videoNode.findall('./description')[0].text
info['title'] = videoNode.findall('./caption')[0].text info['title'] = videoNode.findall('./caption')[0].text
info['url'] = videoNode.findall('./file')[0].text info['url'] = videoNode.findall('./file')[0].text
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
info['ext'] = info['url'].rpartition('.')[2] info['ext'] = info['url'].rpartition('.')[2]
except IndexError: except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file') self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return return
return [info] return [info]
class XVideosIE(InfoExtractor): class XVideosIE(InfoExtractor):
"""Information extractor for xvideos.com""" """Information extractor for xvideos.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
IE_NAME = u'xvideos' IE_NAME = u'xvideos'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1).decode('utf-8') video_id = mobj.group(1).decode('utf-8')
self.report_webpage(video_id) self.report_webpage(video_id)
request = compat_urllib_request.Request(r'http://www.xvideos.com/video' + video_id) request = compat_urllib_request.Request(r'http://www.xvideos.com/video' + video_id)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
self.report_extraction(video_id) self.report_extraction(video_id)
# Extract video URL # Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage) mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.trouble(u'ERROR: unable to extract video url')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1).decode('utf-8')) video_url = compat_urllib_parse.unquote(mobj.group(1).decode('utf-8'))
# Extract title # Extract title
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage) mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(0).decode('utf-8') video_thumbnail = mobj.group(0).decode('utf-8')
info = { info = {
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': 'flv', 'ext': 'flv',
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': None, 'description': None,
} }
return [info] return [info]
class SoundcloudIE(InfoExtractor): class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com """Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed the media can be grabbed by requesting from an url composed
of the stream token and uid of the stream token and uid
""" """
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'soundcloud' IE_NAME = u'soundcloud'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
# extract uploader (which is in the url) # extract uploader (which is in the url)
uploader = mobj.group(1).decode('utf-8') uploader = mobj.group(1).decode('utf-8')
# extract simple title (uploader + slug of song title) # extract simple title (uploader + slug of song title)
slug_title = mobj.group(2).decode('utf-8') slug_title = mobj.group(2).decode('utf-8')
simple_title = uploader + u'-' + slug_title simple_title = uploader + u'-' + slug_title
self.report_webpage('%s/%s' % (uploader, slug_title)) self.report_webpage('%s/%s' % (uploader, slug_title))
request = compat_urllib_request.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title)) request = compat_urllib_request.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
self.report_extraction('%s/%s' % (uploader, slug_title)) self.report_extraction('%s/%s' % (uploader, slug_title))
# extract uid and stream token that soundcloud hands out for access # extract uid and stream token that soundcloud hands out for access
mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage) mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
if mobj: if mobj:
video_id = mobj.group(1) video_id = mobj.group(1)
stream_token = mobj.group(2) stream_token = mobj.group(2)
# extract unsimplified title # extract unsimplified title
mobj = re.search('"title":"(.*?)",', webpage) mobj = re.search('"title":"(.*?)",', webpage)
if mobj: if mobj:
title = mobj.group(1).decode('utf-8') title = mobj.group(1).decode('utf-8')
else: else:
title = simple_title title = simple_title
# construct media url (with uid/token) # construct media url (with uid/token)
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s" mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
mediaURL = mediaURL % (video_id, stream_token) mediaURL = mediaURL % (video_id, stream_token)
# description # description
description = u'No description available' description = u'No description available'
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage) mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
if mobj: if mobj:
description = mobj.group(1) description = mobj.group(1)
# upload date # upload date
upload_date = None upload_date = None
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage) mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
if mobj: if mobj:
try: try:
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d') upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
except Exception as err: except Exception as err:
self._downloader.to_stderr(compat_str(err)) self._downloader.to_stderr(compat_str(err))
# for soundcloud, a request to a cross domain is required for cookies # for soundcloud, a request to a cross domain is required for cookies
request = compat_urllib_request.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) request = compat_urllib_request.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': mediaURL, 'url': mediaURL,
'uploader': uploader.decode('utf-8'), 'uploader': uploader.decode('utf-8'),
'upload_date': upload_date, 'upload_date': upload_date,
'title': title, 'title': title,
'ext': u'mp3', 'ext': u'mp3',
'description': description.decode('utf-8') 'description': description.decode('utf-8')
}] }]
class InfoQIE(InfoExtractor): class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com""" """Information extractor for infoq.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
IE_NAME = u'infoq' IE_NAME = u'infoq'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
self.report_webpage(url) self.report_webpage(url)
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
self.report_extraction(url) self.report_extraction(url)
# Extract video URL # Extract video URL
mobj = re.search(r"jsclassref='([^']*)'", webpage) mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.trouble(u'ERROR: unable to extract video url')
return return
video_url = 'rtmpe://video.infoq.com/cfx/st/' + compat_urllib_parse.unquote(mobj.group(1).decode('base64')) video_url = 'rtmpe://video.infoq.com/cfx/st/' + compat_urllib_parse.unquote(mobj.group(1).decode('base64'))
# Extract title # Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage) mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
# Extract description # Extract description
video_description = u'No description available.' video_description = u'No description available.'
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage) mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
if mobj is not None: if mobj is not None:
video_description = mobj.group(1).decode('utf-8') video_description = mobj.group(1).decode('utf-8')
video_filename = video_url.split('/')[-1] video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.') video_id, extension = video_filename.split('.')
info = { info = {
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': extension, # Extension is always(?) mp4, but seems to be flv 'ext': extension, # Extension is always(?) mp4, but seems to be flv
'thumbnail': None, 'thumbnail': None,
'description': video_description, 'description': video_description,
} }
return [info] return [info]
class MixcloudIE(InfoExtractor): class MixcloudIE(InfoExtractor):
"""Information extractor for www.mixcloud.com""" """Information extractor for www.mixcloud.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'mixcloud' IE_NAME = u'mixcloud'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_json(self, file_id): def report_download_json(self, file_id):
"""Report JSON download.""" """Report JSON download."""
self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME) self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
def report_extraction(self, file_id): def report_extraction(self, file_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def get_urls(self, jsonData, fmt, bitrate='best'): def get_urls(self, jsonData, fmt, bitrate='best'):
"""Get urls from 'audio_formats' section in json""" """Get urls from 'audio_formats' section in json"""
file_url = None file_url = None
try: try:
bitrate_list = jsonData[fmt] bitrate_list = jsonData[fmt]
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
bitrate = max(bitrate_list) # select highest bitrate = max(bitrate_list) # select highest
url_list = jsonData[fmt][bitrate] url_list = jsonData[fmt][bitrate]
except TypeError: # we have no bitrate info. except TypeError: # we have no bitrate info.
url_list = jsonData[fmt] url_list = jsonData[fmt]
return url_list return url_list
def check_urls(self, url_list): def check_urls(self, url_list):
"""Returns 1st active url from list""" """Returns 1st active url from list"""
for url in url_list: for url in url_list:
try: try:
compat_urllib_request.urlopen(url) compat_urllib_request.urlopen(url)
return url return url
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
url = None url = None
return None return None
def _print_formats(self, formats): def _print_formats(self, formats):
print('Available formats:') print('Available formats:')
for fmt in formats.keys(): for fmt in formats.keys():
for b in formats[fmt]: for b in formats[fmt]:
try: try:
ext = formats[fmt][b][0] ext = formats[fmt][b][0]
print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])) print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
except TypeError: # we have no bitrate info except TypeError: # we have no bitrate info
ext = formats[fmt][0] ext = formats[fmt][0]
print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])) print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
break break
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
# extract uploader & filename from url # extract uploader & filename from url
uploader = mobj.group(1).decode('utf-8') uploader = mobj.group(1).decode('utf-8')
file_id = uploader + "-" + mobj.group(2).decode('utf-8') file_id = uploader + "-" + mobj.group(2).decode('utf-8')
# construct API request # construct API request
file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
# retrieve .json file with links to files # retrieve .json file with links to files
request = compat_urllib_request.Request(file_url) request = compat_urllib_request.Request(file_url)
try: try:
self.report_download_json(file_url) self.report_download_json(file_url)
jsonData = compat_urllib_request.urlopen(request).read() jsonData = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
return return
# parse JSON # parse JSON
json_data = json.loads(jsonData) json_data = json.loads(jsonData)
player_url = json_data['player_swf_url'] player_url = json_data['player_swf_url']
formats = dict(json_data['audio_formats']) formats = dict(json_data['audio_formats'])
req_format = self._downloader.params.get('format', None) req_format = self._downloader.params.get('format', None)
bitrate = None bitrate = None
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
self._print_formats(formats) self._print_formats(formats)
return return
if req_format is None or req_format == 'best': if req_format is None or req_format == 'best':
for format_param in formats.keys(): for format_param in formats.keys():
url_list = self.get_urls(formats, format_param) url_list = self.get_urls(formats, format_param)
# check urls # check urls
file_url = self.check_urls(url_list) file_url = self.check_urls(url_list)
if file_url is not None: if file_url is not None:
break # got it! break # got it!
else: else:
if req_format not in formats.keys(): if req_format not in formats.keys():
self._downloader.trouble(u'ERROR: format is not available') self._downloader.trouble(u'ERROR: format is not available')
return return
url_list = self.get_urls(formats, req_format) url_list = self.get_urls(formats, req_format)
file_url = self.check_urls(url_list) file_url = self.check_urls(url_list)
format_param = req_format format_param = req_format
return [{ return [{
'id': file_id.decode('utf-8'), 'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'), 'url': file_url.decode('utf-8'),
'uploader': uploader.decode('utf-8'), 'uploader': uploader.decode('utf-8'),
'upload_date': None, 'upload_date': None,
'title': json_data['name'], 'title': json_data['name'],
'ext': file_url.split('.')[-1].decode('utf-8'), 'ext': file_url.split('.')[-1].decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': json_data['thumbnail_url'], 'thumbnail': json_data['thumbnail_url'],
'description': json_data['description'], 'description': json_data['description'],
'player_url': player_url.decode('utf-8'), 'player_url': player_url.decode('utf-8'),
}] }]
class StanfordOpenClassroomIE(InfoExtractor): class StanfordOpenClassroomIE(InfoExtractor):
"""Information extractor for Stanford's Open ClassRoom""" """Information extractor for Stanford's Open ClassRoom"""
_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
IE_NAME = u'stanfordoc' IE_NAME = u'stanfordoc'
def report_download_webpage(self, objid): def report_download_webpage(self, objid):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if mobj.group('course') and mobj.group('video'): # A specific video if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course') course = mobj.group('course')
video = mobj.group('video') video = mobj.group('video')
info = { info = {
'id': course + '_' + video, 'id': course + '_' + video,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
} }
self.report_extraction(info['id']) self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml' xmlUrl = baseUrl + video + '.xml'
try: try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read() metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
try: try:
info['title'] = mdoc.findall('./title')[0].text info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError: except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file') self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return return
info['ext'] = info['url'].rpartition('.')[2] info['ext'] = info['url'].rpartition('.')[2]
return [info] return [info]
elif mobj.group('course'): # A course page elif mobj.group('course'): # A course page
course = mobj.group('course') course = mobj.group('course')
info = { info = {
'id': course, 'id': course,
'type': 'playlist', 'type': 'playlist',
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
} }
self.report_download_webpage(info['id']) self.report_download_webpage(info['id'])
try: try:
coursepage = compat_urllib_request.urlopen(url).read() coursepage = compat_urllib_request.urlopen(url).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return return
m = re.search('<h1>([^<]+)</h1>', coursepage) m = re.search('<h1>([^<]+)</h1>', coursepage)
if m: if m:
info['title'] = unescapeHTML(m.group(1)) info['title'] = unescapeHTML(m.group(1))
else: else:
info['title'] = info['id'] info['title'] = info['id']
m = re.search('<description>([^<]+)</description>', coursepage) m = re.search('<description>([^<]+)</description>', coursepage)
if m: if m:
info['description'] = unescapeHTML(m.group(1)) info['description'] = unescapeHTML(m.group(1))
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
info['list'] = [ info['list'] = [
{ {
'type': 'reference', 'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
} }
for vpage in links] for vpage in links]
results = [] results = []
for entry in info['list']: for entry in info['list']:
assert entry['type'] == 'reference' assert entry['type'] == 'reference'
results += self.extract(entry['url']) results += self.extract(entry['url'])
return results return results
else: # Root page else: # Root page
info = { info = {
'id': 'Stanford OpenClassroom', 'id': 'Stanford OpenClassroom',
'type': 'playlist', 'type': 'playlist',
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
} }
self.report_download_webpage(info['id']) self.report_download_webpage(info['id'])
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
try: try:
rootpage = compat_urllib_request.urlopen(rootURL).read() rootpage = compat_urllib_request.urlopen(rootURL).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return return
info['title'] = info['id'] info['title'] = info['id']
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
info['list'] = [ info['list'] = [
{ {
'type': 'reference', 'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
} }
for cpage in links] for cpage in links]
results = [] results = []
for entry in info['list']: for entry in info['list']:
assert entry['type'] == 'reference' assert entry['type'] == 'reference'
results += self.extract(entry['url']) results += self.extract(entry['url'])
return results return results
class MTVIE(InfoExtractor): class MTVIE(InfoExtractor):
"""Information extractor for MTV.com""" """Information extractor for MTV.com"""
_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
IE_NAME = u'mtv' IE_NAME = u'mtv'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if not mobj.group('proto'): if not mobj.group('proto'):
url = 'http://' + url url = 'http://' + url
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
self.report_webpage(video_id) self.report_webpage(video_id)
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract song name') self._downloader.trouble(u'ERROR: unable to extract song name')
return return
song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract performer') self._downloader.trouble(u'ERROR: unable to extract performer')
return return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to mtvn_uri') self._downloader.trouble(u'ERROR: unable to mtvn_uri')
return return
mtvn_uri = mobj.group(1) mtvn_uri = mobj.group(1)
mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract content id') self._downloader.trouble(u'ERROR: unable to extract content id')
return return
content_id = mobj.group(1) content_id = mobj.group(1)
videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
self.report_extraction(video_id) self.report_extraction(video_id)
request = compat_urllib_request.Request(videogen_url) request = compat_urllib_request.Request(videogen_url)
try: try:
metadataXml = compat_urllib_request.urlopen(request).read() metadataXml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metadataXml) mdoc = xml.etree.ElementTree.fromstring(metadataXml)
renditions = mdoc.findall('.//rendition') renditions = mdoc.findall('.//rendition')
# For now, always pick the highest quality. # For now, always pick the highest quality.
rendition = renditions[-1] rendition = renditions[-1]
try: try:
_,_,ext = rendition.attrib['type'].partition('/') _,_,ext = rendition.attrib['type'].partition('/')
format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
video_url = rendition.find('./src').text video_url = rendition.find('./src').text
except KeyError: except KeyError:
self._downloader.trouble('Invalid rendition field.') self._downloader.trouble('Invalid rendition field.')
return return
info = { info = {
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': performer, 'uploader': performer,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': ext, 'ext': ext,
'format': format, 'format': format,
} }
return [info] return [info]
class YoukuIE(InfoExtractor): class YoukuIE(InfoExtractor):
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
IE_NAME = u'Youku' IE_NAME = u'Youku'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, file_id): def report_download_webpage(self, file_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id) self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
def report_extraction(self, file_id): def report_extraction(self, file_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id) self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
def _gen_sid(self): def _gen_sid(self):
nowTime = int(time.time() * 1000) nowTime = int(time.time() * 1000)
random1 = random.randint(1000,1998) random1 = random.randint(1000,1998)
random2 = random.randint(1000,9999) random2 = random.randint(1000,9999)
return "%d%d%d" %(nowTime,random1,random2) return "%d%d%d" %(nowTime,random1,random2)
def _get_file_ID_mix_string(self, seed): def _get_file_ID_mix_string(self, seed):
mixed = [] mixed = []
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
seed = float(seed) seed = float(seed)
for i in range(len(source)): for i in range(len(source)):
seed = (seed * 211 + 30031 ) % 65536 seed = (seed * 211 + 30031 ) % 65536
index = math.floor(seed / 65536 * len(source) ) index = math.floor(seed / 65536 * len(source) )
mixed.append(source[int(index)]) mixed.append(source[int(index)])
source.remove(source[int(index)]) source.remove(source[int(index)])
#return ''.join(mixed) #return ''.join(mixed)
return mixed return mixed
def _get_file_id(self, fileId, seed): def _get_file_id(self, fileId, seed):
mixed = self._get_file_ID_mix_string(seed) mixed = self._get_file_ID_mix_string(seed)
ids = fileId.split('*') ids = fileId.split('*')
realId = [] realId = []
for ch in ids: for ch in ids:
if ch: if ch:
realId.append(mixed[int(ch)]) realId.append(mixed[int(ch)])
return ''.join(realId) return ''.join(realId)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group('ID') video_id = mobj.group('ID')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
request = compat_urllib_request.Request(info_url, None, std_headers) request = compat_urllib_request.Request(info_url, None, std_headers)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
jsondata = compat_urllib_request.urlopen(request).read() jsondata = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extraction(video_id) self.report_extraction(video_id)
try: try:
config = json.loads(jsondata) config = json.loads(jsondata)
video_title = config['data'][0]['title'] video_title = config['data'][0]['title']
seed = config['data'][0]['seed'] seed = config['data'][0]['seed']
format = self._downloader.params.get('format', None) format = self._downloader.params.get('format', None)
supported_format = config['data'][0]['streamfileids'].keys() supported_format = config['data'][0]['streamfileids'].keys()
if format is None or format == 'best': if format is None or format == 'best':
if 'hd2' in supported_format: if 'hd2' in supported_format:
format = 'hd2' format = 'hd2'
else: else:
format = 'flv' format = 'flv'
ext = u'flv' ext = u'flv'
elif format == 'worst': elif format == 'worst':
format = 'mp4' format = 'mp4'
ext = u'mp4' ext = u'mp4'
else: else:
format = 'flv' format = 'flv'
ext = u'flv' ext = u'flv'
fileid = config['data'][0]['streamfileids'][format] fileid = config['data'][0]['streamfileids'][format]
seg_number = len(config['data'][0]['segs'][format]) seg_number = len(config['data'][0]['segs'][format])
keys=[] keys=[]
for i in xrange(seg_number): for i in xrange(seg_number):
keys.append(config['data'][0]['segs'][format][i]['k']) keys.append(config['data'][0]['segs'][format][i]['k'])
#TODO check error #TODO check error
#youku only could be viewed from mainland china #youku only could be viewed from mainland china
except: except:
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.trouble(u'ERROR: unable to extract info section')
return return
files_info=[] files_info=[]
sid = self._gen_sid() sid = self._gen_sid()
fileid = self._get_file_id(fileid, seed) fileid = self._get_file_id(fileid, seed)
#column 8,9 of fileid represent the segment number #column 8,9 of fileid represent the segment number
#fileid[7:9] should be changed #fileid[7:9] should be changed
for index, key in enumerate(keys): for index, key in enumerate(keys):
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key) download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = { info = {
'id': '%s_part%02d' % (video_id, index), 'id': '%s_part%02d' % (video_id, index),
'url': download_url, 'url': download_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': ext, 'ext': ext,
} }
files_info.append(info) files_info.append(info)
return files_info return files_info
class XNXXIE(InfoExtractor): class XNXXIE(InfoExtractor):
"""Information extractor for xnxx.com""" """Information extractor for xnxx.com"""
_VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)' _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
IE_NAME = u'xnxx' IE_NAME = u'xnxx'
VIDEO_URL_RE = r'flv_url=(.*?)&amp;' VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM' VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;' VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction""" """Report information extraction"""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction""" """Report information extraction"""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1).decode('utf-8') video_id = mobj.group(1).decode('utf-8')
self.report_webpage(video_id) self.report_webpage(video_id)
# Get webpage content # Get webpage content
try: try:
webpage = compat_urllib_request.urlopen(url).read() webpage = compat_urllib_request.urlopen(url).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
return return
result = re.search(self.VIDEO_URL_RE, webpage) result = re.search(self.VIDEO_URL_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.trouble(u'ERROR: unable to extract video url')
return return
video_url = compat_urllib_parse.unquote(result.group(1).decode('utf-8')) video_url = compat_urllib_parse.unquote(result.group(1).decode('utf-8'))
result = re.search(self.VIDEO_TITLE_RE, webpage) result = re.search(self.VIDEO_TITLE_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = result.group(1).decode('utf-8') video_title = result.group(1).decode('utf-8')
result = re.search(self.VIDEO_THUMB_RE, webpage) result = re.search(self.VIDEO_THUMB_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = result.group(1).decode('utf-8') video_thumbnail = result.group(1).decode('utf-8')
return [{ return [{
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': 'flv', 'ext': 'flv',
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': None, 'description': None,
}] }]
class GooglePlusIE(InfoExtractor): class GooglePlusIE(InfoExtractor):
"""Information extractor for plus.google.com.""" """Information extractor for plus.google.com."""
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)' _VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)'
IE_NAME = u'plus.google' IE_NAME = u'plus.google'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_extract_entry(self, url): def report_extract_entry(self, url):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8'))
def report_date(self, upload_date): def report_date(self, upload_date):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date) self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
def report_uploader(self, uploader): def report_uploader(self, uploader):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8'))
def report_title(self, video_title): def report_title(self, video_title):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8'))
def report_extract_vid_page(self, video_page): def report_extract_vid_page(self, video_page):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8'))
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
post_url = mobj.group(0) post_url = mobj.group(0)
video_id = mobj.group(2) video_id = mobj.group(2)
video_extension = 'flv' video_extension = 'flv'
# Step 1, Retrieve post webpage to extract further information # Step 1, Retrieve post webpage to extract further information
self.report_extract_entry(post_url) self.report_extract_entry(post_url)
request = compat_urllib_request.Request(post_url) request = compat_urllib_request.Request(post_url)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
return return
# Extract update date # Extract update date
upload_date = None upload_date = None
pattern = 'title="Timestamp">(.*?)</a>' pattern = 'title="Timestamp">(.*?)</a>'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
upload_date = mobj.group(1) upload_date = mobj.group(1)
# Convert timestring to a format suitable for filename # Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
upload_date = upload_date.strftime('%Y%m%d') upload_date = upload_date.strftime('%Y%m%d')
self.report_date(upload_date) self.report_date(upload_date)
# Extract uploader # Extract uploader
uploader = None uploader = None
pattern = r'rel\="author".*?>(.*?)</a>' pattern = r'rel\="author".*?>(.*?)</a>'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
uploader = mobj.group(1) uploader = mobj.group(1)
self.report_uploader(uploader) self.report_uploader(uploader)
# Extract title # Extract title
# Get the first line for title # Get the first line for title
video_title = u'NA' video_title = u'NA'
pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]' pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
video_title = mobj.group(1) video_title = mobj.group(1)
self.report_title(video_title) self.report_title(video_title)
# Step 2, Stimulate clicking the image box to launch video # Step 2, Stimulate clicking the image box to launch video
pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video page URL') self._downloader.trouble(u'ERROR: unable to extract video page URL')
video_page = mobj.group(1) video_page = mobj.group(1)
request = compat_urllib_request.Request(video_page) request = compat_urllib_request.Request(video_page)
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extract_vid_page(video_page) self.report_extract_vid_page(video_page)
# Extract video links on video page # Extract video links on video page
"""Extract video links of all sizes""" """Extract video links of all sizes"""
pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage) mobj = re.findall(pattern, webpage)
if len(mobj) == 0: if len(mobj) == 0:
self._downloader.trouble(u'ERROR: unable to extract video links') self._downloader.trouble(u'ERROR: unable to extract video links')
# Sort in resolution # Sort in resolution
links = sorted(mobj) links = sorted(mobj)
# Choose the lowest of the sort, i.e. highest resolution # Choose the lowest of the sort, i.e. highest resolution
video_url = links[-1] video_url = links[-1]
# Only get the url. The resolution part in the tuple has no use anymore # Only get the url. The resolution part in the tuple has no use anymore
video_url = video_url[-1] video_url = video_url[-1]
# Treat escaped \u0026 style hex # Treat escaped \u0026 style hex
video_url = unicode(video_url, "unicode_escape") video_url = unicode(video_url, "unicode_escape")
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url, 'url': video_url,
'uploader': uploader.decode('utf-8'), 'uploader': uploader.decode('utf-8'),
'upload_date': upload_date.decode('utf-8'), 'upload_date': upload_date.decode('utf-8'),
'title': video_title.decode('utf-8'), 'title': video_title.decode('utf-8'),
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
}] }]

View File

@ -10,189 +10,189 @@ from utils import *
class PostProcessor(object): class PostProcessor(object):
"""Post Processor class. """Post Processor class.
PostProcessor objects can be added to downloaders with their PostProcessor objects can be added to downloaders with their
add_post_processor() method. When the downloader has finished a add_post_processor() method. When the downloader has finished a
successful download, it will take its internal chain of PostProcessors successful download, it will take its internal chain of PostProcessors
and start calling the run() method on each one of them, first with and start calling the run() method on each one of them, first with
an initial argument and then with the returned value of the previous an initial argument and then with the returned value of the previous
PostProcessor. PostProcessor.
The chain will be stopped if one of them ever returns None or the end The chain will be stopped if one of them ever returns None or the end
of the chain is reached. of the chain is reached.
PostProcessor objects follow a "mutual registration" process similar PostProcessor objects follow a "mutual registration" process similar
to InfoExtractor objects. to InfoExtractor objects.
""" """
_downloader = None _downloader = None
def __init__(self, downloader=None): def __init__(self, downloader=None):
self._downloader = downloader self._downloader = downloader
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this PP.""" """Sets the downloader for this PP."""
self._downloader = downloader self._downloader = downloader
def run(self, information): def run(self, information):
"""Run the PostProcessor. """Run the PostProcessor.
The "information" argument is a dictionary like the ones The "information" argument is a dictionary like the ones
composed by InfoExtractors. The only difference is that this composed by InfoExtractors. The only difference is that this
one has an extra field called "filepath" that points to the one has an extra field called "filepath" that points to the
downloaded file. downloaded file.
When this method returns None, the postprocessing chain is When this method returns None, the postprocessing chain is
stopped. However, this method may return an information stopped. However, this method may return an information
dictionary that will be passed to the next postprocessing dictionary that will be passed to the next postprocessing
object in the chain. It can be the one it received after object in the chain. It can be the one it received after
changing some fields. changing some fields.
In addition, this method may raise a PostProcessingError In addition, this method may raise a PostProcessingError
exception that will be taken into account by the downloader exception that will be taken into account by the downloader
it was called from. it was called from.
""" """
return information # by default, do nothing return information # by default, do nothing
class AudioConversionError(BaseException): class AudioConversionError(BaseException):
def __init__(self, message): def __init__(self, message):
self.message = message self.message = message
class FFmpegExtractAudioPP(PostProcessor): class FFmpegExtractAudioPP(PostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False):
PostProcessor.__init__(self, downloader) PostProcessor.__init__(self, downloader)
if preferredcodec is None: if preferredcodec is None:
preferredcodec = 'best' preferredcodec = 'best'
self._preferredcodec = preferredcodec self._preferredcodec = preferredcodec
self._preferredquality = preferredquality self._preferredquality = preferredquality
self._keepvideo = keepvideo self._keepvideo = keepvideo
self._exes = self.detect_executables() self._exes = self.detect_executables()
@staticmethod @staticmethod
def detect_executables(): def detect_executables():
def executable(exe): def executable(exe):
try: try:
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError: except OSError:
return False return False
return exe return exe
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((program, executable(program)) for program in programs) return dict((program, executable(program)) for program in programs)
def get_audio_codec(self, path): def get_audio_codec(self, path):
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
try: try:
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)] cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
output = handle.communicate()[0] output = handle.communicate()[0]
if handle.wait() != 0: if handle.wait() != 0:
return None return None
except (IOError, OSError): except (IOError, OSError):
return None return None
audio_codec = None audio_codec = None
for line in output.split('\n'): for line in output.split('\n'):
if line.startswith('codec_name='): if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip() audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None: elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec return audio_codec
return None return None
def run_ffmpeg(self, path, out_path, codec, more_opts): def run_ffmpeg(self, path, out_path, codec, more_opts):
if not self._exes['ffmpeg'] and not self._exes['avconv']: if not self._exes['ffmpeg'] and not self._exes['avconv']:
raise AudioConversionError('ffmpeg or avconv not found. Please install one.') raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
if codec is None: if codec is None:
acodec_opts = [] acodec_opts = []
else: else:
acodec_opts = ['-acodec', codec] acodec_opts = ['-acodec', codec]
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn'] cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
+ acodec_opts + more_opts + + acodec_opts + more_opts +
['--', encodeFilename(out_path)]) ['--', encodeFilename(out_path)])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate() stdout,stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
msg = stderr.strip().split('\n')[-1] msg = stderr.strip().split('\n')[-1]
raise AudioConversionError(msg) raise AudioConversionError(msg)
def run(self, information): def run(self, information):
path = information['filepath'] path = information['filepath']
filecodec = self.get_audio_codec(path) filecodec = self.get_audio_codec(path)
if filecodec is None: if filecodec is None:
self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
return None return None
more_opts = [] more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if self._preferredcodec == 'm4a' and filecodec == 'aac': if self._preferredcodec == 'm4a' and filecodec == 'aac':
# Lossless, but in another container # Lossless, but in another container
acodec = 'copy' acodec = 'copy'
extension = self._preferredcodec extension = self._preferredcodec
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis']: elif filecodec in ['aac', 'mp3', 'vorbis']:
# Lossless if possible # Lossless if possible
acodec = 'copy' acodec = 'copy'
extension = filecodec extension = filecodec
if filecodec == 'aac': if filecodec == 'aac':
more_opts = ['-f', 'adts'] more_opts = ['-f', 'adts']
if filecodec == 'vorbis': if filecodec == 'vorbis':
extension = 'ogg' extension = 'ogg'
else: else:
# MP3 otherwise. # MP3 otherwise.
acodec = 'libmp3lame' acodec = 'libmp3lame'
extension = 'mp3' extension = 'mp3'
more_opts = [] more_opts = []
if self._preferredquality is not None: if self._preferredquality is not None:
if int(self._preferredquality) < 10: if int(self._preferredquality) < 10:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else: else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
else: else:
# We convert the audio (lossy) # We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec extension = self._preferredcodec
more_opts = [] more_opts = []
if self._preferredquality is not None: if self._preferredquality is not None:
if int(self._preferredquality) < 10: if int(self._preferredquality) < 10:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else: else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
if self._preferredcodec == 'aac': if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts'] more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a': if self._preferredcodec == 'm4a':
more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis': if self._preferredcodec == 'vorbis':
extension = 'ogg' extension = 'ogg'
if self._preferredcodec == 'wav': if self._preferredcodec == 'wav':
extension = 'wav' extension = 'wav'
more_opts += ['-f', 'wav'] more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension new_path = prefix + sep + extension
self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path) self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
try: try:
self.run_ffmpeg(path, new_path, acodec, more_opts) self.run_ffmpeg(path, new_path, acodec, more_opts)
except: except:
etype,e,tb = sys.exc_info() etype,e,tb = sys.exc_info()
if isinstance(e, AudioConversionError): if isinstance(e, AudioConversionError):
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
else: else:
self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')) self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
return None return None
# Try to update the date time for extracted audio file. # Try to update the date time for extracted audio file.
if information.get('filetime') is not None: if information.get('filetime') is not None:
try: try:
os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
except: except:
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
if not self._keepvideo: if not self._keepvideo:
try: try:
os.remove(encodeFilename(path)) os.remove(encodeFilename(path))
except (IOError, OSError): except (IOError, OSError):
self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
return None return None
information['filepath'] = new_path information['filepath'] = new_path
return information return information

View File

@ -4,22 +4,22 @@
from __future__ import with_statement from __future__ import with_statement
__authors__ = ( __authors__ = (
'Ricardo Garcia Gonzalez', 'Ricardo Garcia Gonzalez',
'Danny Colligan', 'Danny Colligan',
'Benjamin Johnson', 'Benjamin Johnson',
'Vasyl\' Vavrychuk', 'Vasyl\' Vavrychuk',
'Witold Baryluk', 'Witold Baryluk',
'Paweł Paprota', 'Paweł Paprota',
'Gergely Imreh', 'Gergely Imreh',
'Rogério Brito', 'Rogério Brito',
'Philipp Hagemeister', 'Philipp Hagemeister',
'Sören Schulze', 'Sören Schulze',
'Kevin Ngo', 'Kevin Ngo',
'Ori Avtalion', 'Ori Avtalion',
'shizeeg', 'shizeeg',
'Filippo Valsorda', 'Filippo Valsorda',
'Christian Albrecht', 'Christian Albrecht',
) )
__license__ = 'Public Domain' __license__ = 'Public Domain'
__version__ = '2012.11.29' __version__ = '2012.11.29'
@ -45,529 +45,529 @@ from InfoExtractors import *
from PostProcessor import * from PostProcessor import *
def updateSelf(downloader, filename): def updateSelf(downloader, filename):
''' Update the program file with the latest version from the repository ''' ''' Update the program file with the latest version from the repository '''
# Note: downloader only used for options # Note: downloader only used for options
if not os.access(filename, os.W_OK): if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename) sys.exit('ERROR: no write permissions on %s' % filename)
downloader.to_screen(u'Updating to latest version...') downloader.to_screen(u'Updating to latest version...')
urlv = compat_urllib_request.urlopen(UPDATE_URL_VERSION) urlv = compat_urllib_request.urlopen(UPDATE_URL_VERSION)
newversion = urlv.read().strip() newversion = urlv.read().strip()
if newversion == __version__: if newversion == __version__:
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')') downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
return return
urlv.close() urlv.close()
if hasattr(sys, "frozen"): #py2exe if hasattr(sys, "frozen"): #py2exe
exe = os.path.abspath(filename) exe = os.path.abspath(filename)
directory = os.path.dirname(exe) directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK): if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory) sys.exit('ERROR: no write permissions on %s' % directory)
try: try:
urlh = compat_urllib_request.urlopen(UPDATE_URL_EXE) urlh = compat_urllib_request.urlopen(UPDATE_URL_EXE)
newcontent = urlh.read() newcontent = urlh.read()
urlh.close() urlh.close()
with open(exe + '.new', 'wb') as outf: with open(exe + '.new', 'wb') as outf:
outf.write(newcontent) outf.write(newcontent)
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version') sys.exit('ERROR: unable to download latest version')
try: try:
bat = os.path.join(directory, 'youtube-dl-updater.bat') bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w') b = open(bat, 'w')
b.write(""" b.write("""
echo Updating youtube-dl... echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" move /Y "%s.new" "%s"
del "%s" del "%s"
\n""" %(exe, exe, bat)) \n""" %(exe, exe, bat))
b.close() b.close()
os.startfile(bat) os.startfile(bat)
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version') sys.exit('ERROR: unable to overwrite current version')
else: else:
try: try:
urlh = compat_urllib_request.urlopen(UPDATE_URL) urlh = compat_urllib_request.urlopen(UPDATE_URL)
newcontent = urlh.read() newcontent = urlh.read()
urlh.close() urlh.close()
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version') sys.exit('ERROR: unable to download latest version')
try: try:
with open(filename, 'wb') as outf: with open(filename, 'wb') as outf:
outf.write(newcontent) outf.write(newcontent)
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version') sys.exit('ERROR: unable to overwrite current version')
downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.') downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
def parseOpts(): def parseOpts():
def _readOptions(filename_bytes): def _readOptions(filename_bytes):
try: try:
optionf = open(filename_bytes) optionf = open(filename_bytes)
except IOError: except IOError:
return [] # silently skip if file is not present return [] # silently skip if file is not present
try: try:
res = [] res = []
for l in optionf: for l in optionf:
res += shlex.split(l, comments=True) res += shlex.split(l, comments=True)
finally: finally:
optionf.close() optionf.close()
return res return res
def _format_option_string(option): def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR''' ''' ('-o', '--option') -> -o, --format METAVAR'''
opts = [] opts = []
if option._short_opts: if option._short_opts:
opts.append(option._short_opts[0]) opts.append(option._short_opts[0])
if option._long_opts: if option._long_opts:
opts.append(option._long_opts[0]) opts.append(option._long_opts[0])
if len(opts) > 1: if len(opts) > 1:
opts.insert(1, ', ') opts.insert(1, ', ')
if option.takes_value(): opts.append(' %s' % option.metavar) if option.takes_value(): opts.append(' %s' % option.metavar)
return "".join(opts) return "".join(opts)
def _find_term_columns(): def _find_term_columns():
columns = os.environ.get('COLUMNS', None) columns = os.environ.get('COLUMNS', None)
if columns: if columns:
return int(columns) return int(columns)
try: try:
sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = sp.communicate() out,err = sp.communicate()
return int(out.split()[1]) return int(out.split()[1])
except: except:
pass pass
return None return None
max_width = 80 max_width = 80
max_help_position = 80 max_help_position = 80
# No need to wrap help messages if we're on a wide console # No need to wrap help messages if we're on a wide console
columns = _find_term_columns() columns = _find_term_columns()
if columns: max_width = columns if columns: max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string fmt.format_option_strings = _format_option_string
kw = { kw = {
'version' : __version__, 'version' : __version__,
'formatter' : fmt, 'formatter' : fmt,
'usage' : '%prog [options] url [url...]', 'usage' : '%prog [options] url [url...]',
'conflict_handler' : 'resolve', 'conflict_handler' : 'resolve',
} }
parser = optparse.OptionParser(**kw) parser = optparse.OptionParser(**kw)
# option groups # option groups
general = optparse.OptionGroup(parser, 'General Options') general = optparse.OptionGroup(parser, 'General Options')
selection = optparse.OptionGroup(parser, 'Video Selection') selection = optparse.OptionGroup(parser, 'Video Selection')
authentication = optparse.OptionGroup(parser, 'Authentication Options') authentication = optparse.OptionGroup(parser, 'Authentication Options')
video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format = optparse.OptionGroup(parser, 'Video Format Options')
postproc = optparse.OptionGroup(parser, 'Post-processing Options') postproc = optparse.OptionGroup(parser, 'Post-processing Options')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options') filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
general.add_option('-h', '--help', general.add_option('-h', '--help',
action='help', help='print this help text and exit') action='help', help='print this help text and exit')
general.add_option('-v', '--version', general.add_option('-v', '--version',
action='version', help='print program version and exit') action='version', help='print program version and exit')
general.add_option('-U', '--update', general.add_option('-U', '--update',
action='store_true', dest='update_self', help='update this program to latest version') action='store_true', dest='update_self', help='update this program to latest version')
general.add_option('-i', '--ignore-errors', general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False) action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
general.add_option('-r', '--rate-limit', general.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)') dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
general.add_option('-R', '--retries', general.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10) dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
general.add_option('--buffer-size', general.add_option('--buffer-size',
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024") dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
general.add_option('--no-resize-buffer', general.add_option('--no-resize-buffer',
action='store_true', dest='noresizebuffer', action='store_true', dest='noresizebuffer',
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False) help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
general.add_option('--dump-user-agent', general.add_option('--dump-user-agent',
action='store_true', dest='dump_user_agent', action='store_true', dest='dump_user_agent',
help='display the current browser identification', default=False) help='display the current browser identification', default=False)
general.add_option('--user-agent', general.add_option('--user-agent',
dest='user_agent', help='specify a custom user agent', metavar='UA') dest='user_agent', help='specify a custom user agent', metavar='UA')
general.add_option('--list-extractors', general.add_option('--list-extractors',
action='store_true', dest='list_extractors', action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False) help='List all supported extractors and the URLs they would handle', default=False)
selection.add_option('--playlist-start', selection.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1) dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
selection.add_option('--playlist-end', selection.add_option('--playlist-end',
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1) dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)') selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)') selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None) selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
authentication.add_option('-u', '--username', authentication.add_option('-u', '--username',
dest='username', metavar='USERNAME', help='account username') dest='username', metavar='USERNAME', help='account username')
authentication.add_option('-p', '--password', authentication.add_option('-p', '--password',
dest='password', metavar='PASSWORD', help='account password') dest='password', metavar='PASSWORD', help='account password')
authentication.add_option('-n', '--netrc', authentication.add_option('-n', '--netrc',
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
video_format.add_option('-f', '--format', video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', help='video format code') action='store', dest='format', metavar='FORMAT', help='video format code')
video_format.add_option('--all-formats', video_format.add_option('--all-formats',
action='store_const', dest='format', help='download all available video formats', const='all') action='store_const', dest='format', help='download all available video formats', const='all')
video_format.add_option('--prefer-free-formats', video_format.add_option('--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested') action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
video_format.add_option('--max-quality', video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats', video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)') action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
video_format.add_option('--write-srt', video_format.add_option('--write-srt',
action='store_true', dest='writesubtitles', action='store_true', dest='writesubtitles',
help='write video closed captions to a .srt file (currently youtube only)', default=False) help='write video closed captions to a .srt file (currently youtube only)', default=False)
video_format.add_option('--srt-lang', video_format.add_option('--srt-lang',
action='store', dest='subtitleslang', metavar='LANG', action='store', dest='subtitleslang', metavar='LANG',
help='language of the closed captions to download (optional) use IETF language tags like \'en\'') help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
verbosity.add_option('-q', '--quiet', verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False) action='store_true', dest='quiet', help='activates quiet mode', default=False)
verbosity.add_option('-s', '--simulate', verbosity.add_option('-s', '--simulate',
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False) action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
verbosity.add_option('--skip-download', verbosity.add_option('--skip-download',
action='store_true', dest='skip_download', help='do not download the video', default=False) action='store_true', dest='skip_download', help='do not download the video', default=False)
verbosity.add_option('-g', '--get-url', verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False) action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
verbosity.add_option('-e', '--get-title', verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False) action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
verbosity.add_option('--get-thumbnail', verbosity.add_option('--get-thumbnail',
action='store_true', dest='getthumbnail', action='store_true', dest='getthumbnail',
help='simulate, quiet but print thumbnail URL', default=False) help='simulate, quiet but print thumbnail URL', default=False)
verbosity.add_option('--get-description', verbosity.add_option('--get-description',
action='store_true', dest='getdescription', action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False) help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-filename', verbosity.add_option('--get-filename',
action='store_true', dest='getfilename', action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False) help='simulate, quiet but print output filename', default=False)
verbosity.add_option('--get-format', verbosity.add_option('--get-format',
action='store_true', dest='getformat', action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False) help='simulate, quiet but print output format', default=False)
verbosity.add_option('--no-progress', verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False) action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title', verbosity.add_option('--console-title',
action='store_true', dest='consoletitle', action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False) help='display progress in console titlebar', default=False)
verbosity.add_option('-v', '--verbose', verbosity.add_option('-v', '--verbose',
action='store_true', dest='verbose', help='print various debugging information', default=False) action='store_true', dest='verbose', help='print various debugging information', default=False)
filesystem.add_option('-t', '--title', filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='use title in file name', default=False) action='store_true', dest='usetitle', help='use title in file name', default=False)
filesystem.add_option('--id', filesystem.add_option('--id',
action='store_true', dest='useid', help='use video ID in file name', default=False) action='store_true', dest='useid', help='use video ID in file name', default=False)
filesystem.add_option('-l', '--literal', filesystem.add_option('-l', '--literal',
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False) action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
filesystem.add_option('-A', '--auto-number', filesystem.add_option('-A', '--auto-number',
action='store_true', dest='autonumber', action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False) help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output', filesystem.add_option('-o', '--output',
dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout.') dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout.')
filesystem.add_option('--restrict-filenames', filesystem.add_option('--restrict-filenames',
action='store_true', dest='restrictfilenames', action='store_true', dest='restrictfilenames',
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False) help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
filesystem.add_option('-a', '--batch-file', filesystem.add_option('-a', '--batch-file',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option('-w', '--no-overwrites', filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue', filesystem.add_option('-c', '--continue',
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True) action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
filesystem.add_option('--no-continue', filesystem.add_option('--no-continue',
action='store_false', dest='continue_dl', action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)') help='do not resume partially downloaded files (restart from beginning)')
filesystem.add_option('--cookies', filesystem.add_option('--cookies',
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in') dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
filesystem.add_option('--no-part', filesystem.add_option('--no-part',
action='store_true', dest='nopart', help='do not use .part files', default=False) action='store_true', dest='nopart', help='do not use .part files', default=False)
filesystem.add_option('--no-mtime', filesystem.add_option('--no-mtime',
action='store_false', dest='updatetime', action='store_false', dest='updatetime',
help='do not use the Last-modified header to set the file modification time', default=True) help='do not use the Last-modified header to set the file modification time', default=True)
filesystem.add_option('--write-description', filesystem.add_option('--write-description',
action='store_true', dest='writedescription', action='store_true', dest='writedescription',
help='write video description to a .description file', default=False) help='write video description to a .description file', default=False)
filesystem.add_option('--write-info-json', filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson', action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False) help='write video metadata to a .info.json file', default=False)
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False, postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best', postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default') help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5', postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)') help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False, postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default') help='keeps the video file on disk after the post-processing; the video is erased by default')
parser.add_option_group(general) parser.add_option_group(general)
parser.add_option_group(selection) parser.add_option_group(selection)
parser.add_option_group(filesystem) parser.add_option_group(filesystem)
parser.add_option_group(verbosity) parser.add_option_group(verbosity)
parser.add_option_group(video_format) parser.add_option_group(video_format)
parser.add_option_group(authentication) parser.add_option_group(authentication)
parser.add_option_group(postproc) parser.add_option_group(postproc)
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home: if xdg_config_home:
userConf = os.path.join(xdg_config_home, 'youtube-dl.conf') userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
else: else:
userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:] argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
opts, args = parser.parse_args(argv) opts, args = parser.parse_args(argv)
return parser, opts, args return parser, opts, args
def gen_extractors(): def gen_extractors():
""" Return a list of an instance of every supported extractor. """ Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL. The order does matter; the first extractor matched is the one handling the URL.
""" """
return [ return [
YoutubePlaylistIE(), YoutubePlaylistIE(),
YoutubeChannelIE(), YoutubeChannelIE(),
YoutubeUserIE(), YoutubeUserIE(),
YoutubeSearchIE(), YoutubeSearchIE(),
YoutubeIE(), YoutubeIE(),
MetacafeIE(), MetacafeIE(),
DailymotionIE(), DailymotionIE(),
GoogleIE(), GoogleIE(),
GoogleSearchIE(), GoogleSearchIE(),
PhotobucketIE(), PhotobucketIE(),
YahooIE(), YahooIE(),
YahooSearchIE(), YahooSearchIE(),
DepositFilesIE(), DepositFilesIE(),
FacebookIE(), FacebookIE(),
BlipTVUserIE(), BlipTVUserIE(),
BlipTVIE(), BlipTVIE(),
VimeoIE(), VimeoIE(),
MyVideoIE(), MyVideoIE(),
ComedyCentralIE(), ComedyCentralIE(),
EscapistIE(), EscapistIE(),
CollegeHumorIE(), CollegeHumorIE(),
XVideosIE(), XVideosIE(),
SoundcloudIE(), SoundcloudIE(),
InfoQIE(), InfoQIE(),
MixcloudIE(), MixcloudIE(),
StanfordOpenClassroomIE(), StanfordOpenClassroomIE(),
MTVIE(), MTVIE(),
YoukuIE(), YoukuIE(),
XNXXIE(), XNXXIE(),
GooglePlusIE(), GooglePlusIE(),
ArteTvIE(), ArteTvIE(),
GenericIE() GenericIE()
] ]
def _real_main(): def _real_main():
parser, opts, args = parseOpts() parser, opts, args = parseOpts()
# Open appropriate CookieJar # Open appropriate CookieJar
if opts.cookiefile is None: if opts.cookiefile is None:
jar = compat_cookiejar.CookieJar() jar = compat_cookiejar.CookieJar()
else: else:
try: try:
jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile) jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK): if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
jar.load() jar.load()
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to open cookie file') sys.exit(u'ERROR: unable to open cookie file')
# Set user agent # Set user agent
if opts.user_agent is not None: if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent std_headers['User-Agent'] = opts.user_agent
# Dump user agent # Dump user agent
if opts.dump_user_agent: if opts.dump_user_agent:
print(std_headers['User-Agent']) print(std_headers['User-Agent'])
sys.exit(0) sys.exit(0)
# Batch file verification # Batch file verification
batchurls = [] batchurls = []
if opts.batchfile is not None: if opts.batchfile is not None:
try: try:
if opts.batchfile == '-': if opts.batchfile == '-':
batchfd = sys.stdin batchfd = sys.stdin
else: else:
batchfd = open(opts.batchfile, 'r') batchfd = open(opts.batchfile, 'r')
batchurls = batchfd.readlines() batchurls = batchfd.readlines()
batchurls = [x.strip() for x in batchurls] batchurls = [x.strip() for x in batchurls]
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
except IOError: except IOError:
sys.exit(u'ERROR: batch file could not be read') sys.exit(u'ERROR: batch file could not be read')
all_urls = batchurls + args all_urls = batchurls + args
all_urls = [url.strip() for url in all_urls] all_urls = [url.strip() for url in all_urls]
# General configuration # General configuration
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler() proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener) compat_urllib_request.install_opener(opener)
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
extractors = gen_extractors() extractors = gen_extractors()
if opts.list_extractors: if opts.list_extractors:
for ie in extractors: for ie in extractors:
print(ie.IE_NAME) print(ie.IE_NAME)
matchedUrls = filter(lambda url: ie.suitable(url), all_urls) matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
all_urls = filter(lambda url: url not in matchedUrls, all_urls) all_urls = filter(lambda url: url not in matchedUrls, all_urls)
for mu in matchedUrls: for mu in matchedUrls:
print(u' ' + mu) print(u' ' + mu)
sys.exit(0) sys.exit(0)
# Conflicting, missing and erroneous options # Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None): if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password') parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None: if opts.password is not None and opts.username is None:
parser.error(u'account username missing') parser.error(u'account username missing')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error(u'using output template conflicts with using title, video ID or auto number') parser.error(u'using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid: if opts.usetitle and opts.useid:
parser.error(u'using title conflicts with using video ID') parser.error(u'using title conflicts with using video ID')
if opts.username is not None and opts.password is None: if opts.username is not None and opts.password is None:
opts.password = getpass.getpass(u'Type account password and press return:') opts.password = getpass.getpass(u'Type account password and press return:')
if opts.ratelimit is not None: if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None: if numeric_limit is None:
parser.error(u'invalid rate limit specified') parser.error(u'invalid rate limit specified')
opts.ratelimit = numeric_limit opts.ratelimit = numeric_limit
if opts.retries is not None: if opts.retries is not None:
try: try:
opts.retries = int(opts.retries) opts.retries = int(opts.retries)
except (TypeError, ValueError) as err: except (TypeError, ValueError) as err:
parser.error(u'invalid retry count specified') parser.error(u'invalid retry count specified')
if opts.buffersize is not None: if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None: if numeric_buffersize is None:
parser.error(u'invalid buffer size specified') parser.error(u'invalid buffer size specified')
opts.buffersize = numeric_buffersize opts.buffersize = numeric_buffersize
try: try:
opts.playliststart = int(opts.playliststart) opts.playliststart = int(opts.playliststart)
if opts.playliststart <= 0: if opts.playliststart <= 0:
raise ValueError(u'Playlist start must be positive') raise ValueError(u'Playlist start must be positive')
except (TypeError, ValueError) as err: except (TypeError, ValueError) as err:
parser.error(u'invalid playlist start number specified') parser.error(u'invalid playlist start number specified')
try: try:
opts.playlistend = int(opts.playlistend) opts.playlistend = int(opts.playlistend)
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart): if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
raise ValueError(u'Playlist end must be greater than playlist start') raise ValueError(u'Playlist end must be greater than playlist start')
except (TypeError, ValueError) as err: except (TypeError, ValueError) as err:
parser.error(u'invalid playlist end number specified') parser.error(u'invalid playlist end number specified')
if opts.extractaudio: if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']: if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
parser.error(u'invalid audio format specified') parser.error(u'invalid audio format specified')
if opts.audioquality: if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K') opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit(): if not opts.audioquality.isdigit():
parser.error(u'invalid audio quality specified') parser.error(u'invalid audio quality specified')
# File downloader # File downloader
fd = FileDownloader({ fd = FileDownloader({
'usenetrc': opts.usenetrc, 'usenetrc': opts.usenetrc,
'username': opts.username, 'username': opts.username,
'password': opts.password, 'password': opts.password,
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), 'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
'forceurl': opts.geturl, 'forceurl': opts.geturl,
'forcetitle': opts.gettitle, 'forcetitle': opts.gettitle,
'forcethumbnail': opts.getthumbnail, 'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription, 'forcedescription': opts.getdescription,
'forcefilename': opts.getfilename, 'forcefilename': opts.getfilename,
'forceformat': opts.getformat, 'forceformat': opts.getformat,
'simulate': opts.simulate, 'simulate': opts.simulate,
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), 'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
'format': opts.format, 'format': opts.format,
'format_limit': opts.format_limit, 'format_limit': opts.format_limit,
'listformats': opts.listformats, 'listformats': opts.listformats,
'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding())) 'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
or (opts.useid and u'%(id)s.%(ext)s') or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(id)s.%(ext)s'), or u'%(id)s.%(ext)s'),
'restrictfilenames': opts.restrictfilenames, 'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors, 'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit, 'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites, 'nooverwrites': opts.nooverwrites,
'retries': opts.retries, 'retries': opts.retries,
'buffersize': opts.buffersize, 'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer, 'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl, 'continuedl': opts.continue_dl,
'noprogress': opts.noprogress, 'noprogress': opts.noprogress,
'playliststart': opts.playliststart, 'playliststart': opts.playliststart,
'playlistend': opts.playlistend, 'playlistend': opts.playlistend,
'logtostderr': opts.outtmpl == '-', 'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle, 'consoletitle': opts.consoletitle,
'nopart': opts.nopart, 'nopart': opts.nopart,
'updatetime': opts.updatetime, 'updatetime': opts.updatetime,
'writedescription': opts.writedescription, 'writedescription': opts.writedescription,
'writeinfojson': opts.writeinfojson, 'writeinfojson': opts.writeinfojson,
'writesubtitles': opts.writesubtitles, 'writesubtitles': opts.writesubtitles,
'subtitleslang': opts.subtitleslang, 'subtitleslang': opts.subtitleslang,
'matchtitle': opts.matchtitle, 'matchtitle': opts.matchtitle,
'rejecttitle': opts.rejecttitle, 'rejecttitle': opts.rejecttitle,
'max_downloads': opts.max_downloads, 'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats, 'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose, 'verbose': opts.verbose,
}) })
if opts.verbose: if opts.verbose:
fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies)) fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
for extractor in extractors: for extractor in extractors:
fd.add_info_extractor(extractor) fd.add_info_extractor(extractor)
# PostProcessors # PostProcessors
if opts.extractaudio: if opts.extractaudio:
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo)) fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo))
# Update version # Update version
if opts.update_self: if opts.update_self:
updateSelf(fd, sys.argv[0]) updateSelf(fd, sys.argv[0])
# Maybe do nothing # Maybe do nothing
if len(all_urls) < 1: if len(all_urls) < 1:
if not opts.update_self: if not opts.update_self:
parser.error(u'you must provide at least one URL') parser.error(u'you must provide at least one URL')
else: else:
sys.exit() sys.exit()
try: try:
retcode = fd.download(all_urls) retcode = fd.download(all_urls)
except MaxDownloadsReached: except MaxDownloadsReached:
fd.to_screen(u'--max-download limit reached, aborting.') fd.to_screen(u'--max-download limit reached, aborting.')
retcode = 101 retcode = 101
# Dump cookie jar if requested # Dump cookie jar if requested
if opts.cookiefile is not None: if opts.cookiefile is not None:
try: try:
jar.save() jar.save()
except (IOError, OSError) as err: except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to save cookie jar') sys.exit(u'ERROR: unable to save cookie jar')
sys.exit(retcode) sys.exit(retcode)
def main(): def main():
try: try:
_real_main() _real_main()
except DownloadError: except DownloadError:
sys.exit(1) sys.exit(1)
except SameFileError: except SameFileError:
sys.exit(u'ERROR: fixed output name but more than one file to download') sys.exit(u'ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt: except KeyboardInterrupt:
sys.exit(u'\nERROR: Interrupted by user') sys.exit(u'\nERROR: Interrupted by user')

View File

@ -4,4 +4,4 @@
import __init__ import __init__
if __name__ == '__main__': if __name__ == '__main__':
__init__.main() __init__.main()

View File

@ -12,490 +12,490 @@ import email.utils
import json import json
try: try:
import urllib.request as compat_urllib_request import urllib.request as compat_urllib_request
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_request import urllib2 as compat_urllib_request
try: try:
import urllib.error as compat_urllib_error import urllib.error as compat_urllib_error
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_error import urllib2 as compat_urllib_error
try: try:
import urllib.parse as compat_urllib_parse import urllib.parse as compat_urllib_parse
except ImportError: # Python 2 except ImportError: # Python 2
import urllib as compat_urllib_parse import urllib as compat_urllib_parse
try: try:
import http.cookiejar as compat_cookiejar import http.cookiejar as compat_cookiejar
except ImportError: # Python 2 except ImportError: # Python 2
import cookielib as compat_cookiejar import cookielib as compat_cookiejar
try: try:
import html.entities as compat_html_entities import html.entities as compat_html_entities
except ImportError: # Python 2 except ImportError: # Python 2
import htmlentitydefs as compat_html_entities import htmlentitydefs as compat_html_entities
try: try:
import html.parser as compat_html_parser import html.parser as compat_html_parser
except ImportError: # Python 2 except ImportError: # Python 2
import HTMLParser as compat_html_parser import HTMLParser as compat_html_parser
try: try:
import http.client as compat_http_client import http.client as compat_http_client
except ImportError: # Python 2 except ImportError: # Python 2
import httplib as compat_http_client import httplib as compat_http_client
try: try:
from urllib.parse import parse_qs as compat_parse_qs from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2 except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken # Python 2's version is apparently totally broken
def _unquote(string, encoding='utf-8', errors='replace'): def _unquote(string, encoding='utf-8', errors='replace'):
if string == '': if string == '':
return string return string
res = string.split('%') res = string.split('%')
if len(res) == 1: if len(res) == 1:
return string return string
if encoding is None: if encoding is None:
encoding = 'utf-8' encoding = 'utf-8'
if errors is None: if errors is None:
errors = 'replace' errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b'' pct_sequence = b''
string = res[0] string = res[0]
for item in res[1:]: for item in res[1:]:
try: try:
if not item: if not item:
raise ValueError raise ValueError
pct_sequence += item[:2].decode('hex') pct_sequence += item[:2].decode('hex')
rest = item[2:] rest = item[2:]
if not rest: if not rest:
# This segment was just a single percent-encoded character. # This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding. # May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence). # (Stored in pct_sequence).
continue continue
except ValueError: except ValueError:
rest = '%' + item rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current # Encountered non-percent-encoded characters. Flush the current
# pct_sequence. # pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b'' pct_sequence = b''
if pct_sequence: if pct_sequence:
# Flush the final pct_sequence # Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors) string += pct_sequence.decode(encoding, errors)
return string return string
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'): encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = [] r = []
for name_value in pairs: for name_value in pairs:
if not name_value and not strict_parsing: if not name_value and not strict_parsing:
continue continue
nv = name_value.split('=', 1) nv = name_value.split('=', 1)
if len(nv) != 2: if len(nv) != 2:
if strict_parsing: if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,)) raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign # Handle case of a control-name with no equal sign
if keep_blank_values: if keep_blank_values:
nv.append('') nv.append('')
else: else:
continue continue
if len(nv[1]) or keep_blank_values: if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ') name = nv[0].replace('+', ' ')
name = _unquote(name, encoding=encoding, errors=errors) name = _unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name) name = _coerce_result(name)
value = nv[1].replace('+', ' ') value = nv[1].replace('+', ' ')
value = _unquote(value, encoding=encoding, errors=errors) value = _unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value) value = _coerce_result(value)
r.append((name, value)) r.append((name, value))
return r return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'): encoding='utf-8', errors='replace'):
parsed_result = {} parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors) encoding=encoding, errors=errors)
for name, value in pairs: for name, value in pairs:
if name in parsed_result: if name in parsed_result:
parsed_result[name].append(value) parsed_result[name].append(value)
else: else:
parsed_result[name] = [value] parsed_result[name] = [value]
return parsed_result return parsed_result
try: try:
compat_str = unicode # Python 2 compat_str = unicode # Python 2
except NameError: except NameError:
compat_str = str compat_str = str
try: try:
compat_chr = unichr # Python 2 compat_chr = unichr # Python 2
except NameError: except NameError:
compat_chr = chr compat_chr = chr
std_headers = { std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5', 'Accept-Language': 'en-us,en;q=0.5',
} }
def preferredencoding(): def preferredencoding():
"""Get preferred encoding. """Get preferred encoding.
Returns the best encoding scheme for the system, based on Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks. locale.getpreferredencoding() and some further tweaks.
""" """
try: try:
pref = locale.getpreferredencoding() pref = locale.getpreferredencoding()
u'TEST'.encode(pref) u'TEST'.encode(pref)
except: except:
pref = 'UTF-8' pref = 'UTF-8'
return pref return pref
if sys.version_info < (3,0): if sys.version_info < (3,0):
def compat_print(s): def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace')) print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else: else:
def compat_print(s): def compat_print(s):
assert type(s) == type(u'') assert type(s) == type(u'')
print(s) print(s)
def htmlentity_transform(matchobj): def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a character. """Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with This function receives a match object and is intended to be used with
the re.sub() function. the re.sub() function.
""" """
entity = matchobj.group(1) entity = matchobj.group(1)
# Known non-numeric HTML entity # Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint: if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity]) return compat_chr(compat_html_entities.name2codepoint[entity])
mobj = re.match(u'(?u)#(x?\\d+)', entity) mobj = re.match(u'(?u)#(x?\\d+)', entity)
if mobj is not None: if mobj is not None:
numstr = mobj.group(1) numstr = mobj.group(1)
if numstr.startswith(u'x'): if numstr.startswith(u'x'):
base = 16 base = 16
numstr = u'0%s' % numstr numstr = u'0%s' % numstr
else: else:
base = 10 base = 10
return compat_chr(int(numstr, base)) return compat_chr(int(numstr, base))
# Unknown entity in name, return its literal representation # Unknown entity in name, return its literal representation
return (u'&%s;' % entity) return (u'&%s;' % entity)
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class IDParser(compat_html_parser.HTMLParser): class IDParser(compat_html_parser.HTMLParser):
"""Modified HTMLParser that isolates a tag with the specified id""" """Modified HTMLParser that isolates a tag with the specified id"""
def __init__(self, id): def __init__(self, id):
self.id = id self.id = id
self.result = None self.result = None
self.started = False self.started = False
self.depth = {} self.depth = {}
self.html = None self.html = None
self.watch_startpos = False self.watch_startpos = False
self.error_count = 0 self.error_count = 0
compat_html_parser.HTMLParser.__init__(self) compat_html_parser.HTMLParser.__init__(self)
def error(self, message): def error(self, message):
if self.error_count > 10 or self.started: if self.error_count > 10 or self.started:
raise compat_html_parser.HTMLParseError(message, self.getpos()) raise compat_html_parser.HTMLParseError(message, self.getpos())
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
self.error_count += 1 self.error_count += 1
self.goahead(1) self.goahead(1)
def loads(self, html): def loads(self, html):
self.html = html self.html = html
self.feed(html) self.feed(html)
self.close() self.close()
def handle_starttag(self, tag, attrs): def handle_starttag(self, tag, attrs):
attrs = dict(attrs) attrs = dict(attrs)
if self.started: if self.started:
self.find_startpos(None) self.find_startpos(None)
if 'id' in attrs and attrs['id'] == self.id: if 'id' in attrs and attrs['id'] == self.id:
self.result = [tag] self.result = [tag]
self.started = True self.started = True
self.watch_startpos = True self.watch_startpos = True
if self.started: if self.started:
if not tag in self.depth: self.depth[tag] = 0 if not tag in self.depth: self.depth[tag] = 0
self.depth[tag] += 1 self.depth[tag] += 1
def handle_endtag(self, tag): def handle_endtag(self, tag):
if self.started: if self.started:
if tag in self.depth: self.depth[tag] -= 1 if tag in self.depth: self.depth[tag] -= 1
if self.depth[self.result[0]] == 0: if self.depth[self.result[0]] == 0:
self.started = False self.started = False
self.result.append(self.getpos()) self.result.append(self.getpos())
def find_startpos(self, x): def find_startpos(self, x):
"""Needed to put the start position of the result (self.result[1]) """Needed to put the start position of the result (self.result[1])
after the opening tag with the requested id""" after the opening tag with the requested id"""
if self.watch_startpos: if self.watch_startpos:
self.watch_startpos = False self.watch_startpos = False
self.result.append(self.getpos()) self.result.append(self.getpos())
handle_entityref = handle_charref = handle_data = handle_comment = \ handle_entityref = handle_charref = handle_data = handle_comment = \
handle_decl = handle_pi = unknown_decl = find_startpos handle_decl = handle_pi = unknown_decl = find_startpos
def get_result(self): def get_result(self):
if self.result is None: if self.result is None:
return None return None
if len(self.result) != 3: if len(self.result) != 3:
return None return None
lines = self.html.split('\n') lines = self.html.split('\n')
lines = lines[self.result[1][0]-1:self.result[2][0]] lines = lines[self.result[1][0]-1:self.result[2][0]]
lines[0] = lines[0][self.result[1][1]:] lines[0] = lines[0][self.result[1][1]:]
if len(lines) == 1: if len(lines) == 1:
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
lines[-1] = lines[-1][:self.result[2][1]] lines[-1] = lines[-1][:self.result[2][1]]
return '\n'.join(lines).strip() return '\n'.join(lines).strip()
def get_element_by_id(id, html): def get_element_by_id(id, html):
"""Return the content of the tag with the specified id in the passed HTML document""" """Return the content of the tag with the specified id in the passed HTML document"""
parser = IDParser(id) parser = IDParser(id)
try: try:
parser.loads(html) parser.loads(html)
except compat_html_parser.HTMLParseError: except compat_html_parser.HTMLParseError:
pass pass
return parser.get_result() return parser.get_result()
def clean_html(html): def clean_html(html):
"""Clean an HTML snippet into a readable string""" """Clean an HTML snippet into a readable string"""
# Newline vs <br /> # Newline vs <br />
html = html.replace('\n', ' ') html = html.replace('\n', ' ')
html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html)
# Strip html tags # Strip html tags
html = re.sub('<.*?>', '', html) html = re.sub('<.*?>', '', html)
# Replace html entities # Replace html entities
html = unescapeHTML(html) html = unescapeHTML(html)
return html return html
def sanitize_open(filename, open_mode): def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails. """Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open() or it fails and raises a final exception, like the standard open()
function. function.
It returns the tuple (stream, definitive_file_name). It returns the tuple (stream, definitive_file_name).
""" """
try: try:
if filename == u'-': if filename == u'-':
if sys.platform == 'win32': if sys.platform == 'win32':
import msvcrt import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename) return (sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
except (IOError, OSError) as err: except (IOError, OSError) as err:
# In case of error, try to remove win32 forbidden chars # In case of error, try to remove win32 forbidden chars
filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename) filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
# An exception here should be caught in the caller # An exception here should be caught in the caller
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
def timeconvert(timestr): def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp""" """Convert RFC 2822 defined time string into system timestamp"""
timestamp = None timestamp = None
timetuple = email.utils.parsedate_tz(timestr) timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None: if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple) timestamp = email.utils.mktime_tz(timetuple)
return timestamp return timestamp
def sanitize_filename(s, restricted=False): def sanitize_filename(s, restricted=False):
"""Sanitizes a string so it could be used as part of a filename. """Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters. If restricted is set, use a stricter subset of allowed characters.
""" """
def replace_insane(char): def replace_insane(char):
if char == '?' or ord(char) < 32 or ord(char) == 127: if char == '?' or ord(char) < 32 or ord(char) == 127:
return '' return ''
elif char == '"': elif char == '"':
return '' if restricted else '\'' return '' if restricted else '\''
elif char == ':': elif char == ':':
return '_-' if restricted else ' -' return '_-' if restricted else ' -'
elif char in '\\/|*<>': elif char in '\\/|*<>':
return '_' return '_'
if restricted and (char in '!&\'' or char.isspace()): if restricted and (char in '!&\'' or char.isspace()):
return '_' return '_'
if restricted and ord(char) > 127: if restricted and ord(char) > 127:
return '_' return '_'
return char return char
result = u''.join(map(replace_insane, s)) result = u''.join(map(replace_insane, s))
while '__' in result: while '__' in result:
result = result.replace('__', '_') result = result.replace('__', '_')
result = result.strip('_') result = result.strip('_')
# Common case of "Foreign band name - English song title" # Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'): if restricted and result.startswith('-_'):
result = result[2:] result = result[2:]
if not result: if not result:
result = '_' result = '_'
return result return result
def orderedSet(iterable): def orderedSet(iterable):
""" Remove all duplicates from the input iterable """ """ Remove all duplicates from the input iterable """
res = [] res = []
for el in iterable: for el in iterable:
if el not in res: if el not in res:
res.append(el) res.append(el)
return res return res
def unescapeHTML(s): def unescapeHTML(s):
""" """
@param s a string @param s a string
""" """
assert type(s) == type(u'') assert type(s) == type(u'')
result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
return result return result
def encodeFilename(s): def encodeFilename(s):
""" """
@param s The name of the file @param s The name of the file
""" """
assert type(s) == type(u'') assert type(s) == type(u'')
# Python 3 has a Unicode API # Python 3 has a Unicode API
if sys.version_info >= (3, 0): if sys.version_info >= (3, 0):
return s return s
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up # Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would # (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.) # match Windows 9x series as well. Besides, NT 4 is obsolete.)
return s return s
else: else:
return s.encode(sys.getfilesystemencoding(), 'ignore') return s.encode(sys.getfilesystemencoding(), 'ignore')
class DownloadError(Exception): class DownloadError(Exception):
"""Download Error exception. """Download Error exception.
This exception may be thrown by FileDownloader objects if they are not This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate configured to continue on errors. They will contain the appropriate
error message. error message.
""" """
pass pass
class SameFileError(Exception): class SameFileError(Exception):
"""Same File exception. """Same File exception.
This exception will be thrown by FileDownloader objects if they detect This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk. multiple files would have to be downloaded to the same file on disk.
""" """
pass pass
class PostProcessingError(Exception): class PostProcessingError(Exception):
"""Post Processing exception. """Post Processing exception.
This exception may be raised by PostProcessor's .run() method to This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task. indicate an error in the postprocessing task.
""" """
pass pass
class MaxDownloadsReached(Exception): class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """ """ --max-downloads limit has been reached. """
pass pass
class UnavailableVideoError(Exception): class UnavailableVideoError(Exception):
"""Unavailable Format exception. """Unavailable Format exception.
This exception will be thrown when a video is requested This exception will be thrown when a video is requested
in a format that is not available for that video. in a format that is not available for that video.
""" """
pass pass
class ContentTooShortError(Exception): class ContentTooShortError(Exception):
"""Content Too Short exception. """Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating download is too small for what the server announced first, indicating
the connection was probably interrupted. the connection was probably interrupted.
""" """
# Both in bytes # Both in bytes
downloaded = None downloaded = None
expected = None expected = None
def __init__(self, downloaded, expected): def __init__(self, downloaded, expected):
self.downloaded = downloaded self.downloaded = downloaded
self.expected = expected self.expected = expected
class Trouble(Exception): class Trouble(Exception):
"""Trouble helper exception """Trouble helper exception
This is an exception to be handled with This is an exception to be handled with
FileDownloader.trouble FileDownloader.trouble
""" """
class YoutubeDLHandler(compat_urllib_request.HTTPHandler): class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses. """Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be to include the HTTP header "Youtubedl-No-Compression", which will be
removed before making the real request. removed before making the real request.
Part of this code was copied from: Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/ http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the Andrew Rowls, the author of that code, agreed to release it to the
public domain. public domain.
""" """
@staticmethod @staticmethod
def deflate(data): def deflate(data):
try: try:
return zlib.decompress(data, -zlib.MAX_WBITS) return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error: except zlib.error:
return zlib.decompress(data) return zlib.decompress(data)
@staticmethod @staticmethod
def addinfourl_wrapper(stream, headers, url, code): def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'): if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code) return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url) ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code ret.code = code
return ret return ret
def http_request(self, req): def http_request(self, req):
for h in std_headers: for h in std_headers:
if h in req.headers: if h in req.headers:
del req.headers[h] del req.headers[h]
req.add_header(h, std_headers[h]) req.add_header(h, std_headers[h])
if 'Youtubedl-no-compression' in req.headers: if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers: if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding'] del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression'] del req.headers['Youtubedl-no-compression']
return req return req
def http_response(self, req, resp): def http_response(self, req, resp):
old_resp = resp old_resp = resp
# gzip # gzip
if resp.headers.get('Content-encoding', '') == 'gzip': if resp.headers.get('Content-encoding', '') == 'gzip':
gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r') gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r')
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg resp.msg = old_resp.msg
# deflate # deflate
if resp.headers.get('Content-encoding', '') == 'deflate': if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read())) gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg resp.msg = old_resp.msg
return resp return resp