1
0
mirror of https://code.hackerspace.pl/q3k/youtube-dl synced 2025-03-16 11:43:02 +00:00

Merge branch 'master' into extract_info_rewrite

This commit is contained in:
Jaime Marquínez Ferrándiz 2013-04-05 12:39:51 +02:00
commit 14294236bf
11 changed files with 550 additions and 218 deletions

2
.gitignore vendored
View File

@ -17,4 +17,4 @@ youtube-dl.tar.gz
.coverage .coverage
cover/ cover/
updates_key.pem updates_key.pem
*.egg-info *.egg-info

View File

@ -18,7 +18,7 @@ which means you can modify it, redistribute it or use it however you like.
--version print program version and exit --version print program version and exit
-U, --update update this program to latest version -U, --update update this program to latest version
-i, --ignore-errors continue on download errors -i, --ignore-errors continue on download errors
-r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m) -r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
-R, --retries RETRIES number of retries (default is 10) -R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default --buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default
is 1024) is 1024)
@ -97,10 +97,16 @@ which means you can modify it, redistribute it or use it however you like.
requested requested
--max-quality FORMAT highest quality format to download --max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats (currently youtube only) -F, --list-formats list all available formats (currently youtube only)
--write-srt write video closed captions to a .srt file --write-sub write subtitle file (currently youtube only)
--only-sub downloads only the subtitles (no video)
--all-subs downloads all the available subtitles of the video
(currently youtube only) (currently youtube only)
--srt-lang LANG language of the closed captions to download --list-subs lists all available subtitles for the video
(optional) use IETF language tags like 'en' (currently youtube only)
--sub-format LANG subtitle format [srt/sbv] (default=srt) (currently
youtube only)
--sub-lang LANG language of the subtitles to download (optional)
use IETF language tags like 'en'
## Authentication Options: ## Authentication Options:
-u, --username USERNAME account username -u, --username USERNAME account username

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python3
import datetime
import textwrap
import json
atom_template=textwrap.dedent("""\
<?xml version='1.0' encoding='utf-8'?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
<atom:title>youtube-dl releases</atom:title>
<atom:id>youtube-dl-updates-feed</atom:id>
<atom:updated>@TIMESTAMP@</atom:updated>
@ENTRIES@
</atom:feed>""")
entry_template=textwrap.dedent("""
<atom:entry>
<atom:id>youtube-dl-@VERSION@</atom:id>
<atom:title>New version @VERSION@</atom:title>
<atom:link href="http://rg3.github.com/youtube-dl" />
<atom:content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
</div>
</atom:content>
<atom:author>
<atom:name>The youtube-dl maintainers</atom:name>
</atom:author>
<atom:updated>@TIMESTAMP@</atom:updated>
</atom:entry>
""")
now = datetime.datetime.now()
now_iso = now.isoformat()
atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
entries=[]
versions_info = json.load(open('update/versions.json'))
versions = list(versions_info['versions'].keys())
versions.sort()
for v in versions:
entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
entry = entry.replace('@VERSION@',v)
entries.append(entry)
entries_str = textwrap.indent(''.join(entries), '\t')
atom_template = atom_template.replace('@ENTRIES@', entries_str)
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
atom_file.write(atom_template)

View File

@ -69,6 +69,7 @@ ROOT=$(pwd)
ORIGIN_URL=$(git config --get remote.origin.url) ORIGIN_URL=$(git config --get remote.origin.url)
cd build/gh-pages cd build/gh-pages
"$ROOT/devscripts/gh-pages/add-version.py" $version "$ROOT/devscripts/gh-pages/add-version.py" $version
"$ROOT/devscripts/gh-pages/update-feed.py"
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem" "$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
"$ROOT/devscripts/gh-pages/generate-download.py" "$ROOT/devscripts/gh-pages/generate-download.py"
"$ROOT/devscripts/gh-pages/update-copyright.py" "$ROOT/devscripts/gh-pages/update-copyright.py"

View File

@ -20,6 +20,8 @@ from youtube_dl.utils import *
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json') DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
RETRIES = 3
# General configuration (from __init__, not very elegant...) # General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar() jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
@ -79,9 +81,8 @@ def generator(test_case):
params.update(test_case.get('params', {})) params.update(test_case.get('params', {}))
fd = FileDownloader(params) fd = FileDownloader(params)
fd.add_info_extractor(ie()) for ie in youtube_dl.InfoExtractors.gen_extractors():
for ien in test_case.get('add_ie', []): fd.add_info_extractor(ie)
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
finished_hook_called = set() finished_hook_called = set()
def _hook(status): def _hook(status):
if status['status'] == 'finished': if status['status'] == 'finished':
@ -94,7 +95,19 @@ def generator(test_case):
_try_rm(tc['file'] + '.part') _try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json') _try_rm(tc['file'] + '.info.json')
try: try:
fd.download([test_case['url']]) for retry in range(1, RETRIES + 1):
try:
fd.download([test_case['url']])
except (DownloadError, ExtractorError) as err:
if retry == RETRIES: raise
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
raise
print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
else:
break
for tc in test_cases: for tc in test_cases:
if not test_case.get('params', {}).get('skip_download', False): if not test_case.get('params', {}).get('skip_download', False):

View File

@ -308,5 +308,25 @@
"info_dict": { "info_dict": {
"title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv" "title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
} }
},
{
"name": "LiveLeak",
"md5": "0813c2430bea7a46bf13acf3406992f4",
"url": "http://www.liveleak.com/view?i=757_1364311680",
"file": "757_1364311680.mp4",
"info_dict": {
"title": "Most unlucky car accident",
"description": "extremely bad day for this guy..!",
"uploader": "ljfriel2"
}
},
{
"name": "WorldStarHipHop",
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
"file": "wshh6a7q1ny0G34ZwuIO.mp4",
"md5": "9d04de741161603bf7071bbf4e883186",
"info_dict": {
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
}
} }
] ]

Binary file not shown.

View File

@ -231,11 +231,21 @@ class FileDownloader(object):
self.to_stderr(message) self.to_stderr(message)
if self.params.get('verbose'): if self.params.get('verbose'):
if tb is None: if tb is None:
tb_data = traceback.format_list(traceback.extract_stack()) if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = u''.join(tb_data) tb = u''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = u''.join(tb_data)
self.to_stderr(tb) self.to_stderr(tb)
if not self.params.get('ignoreerrors', False): if not self.params.get('ignoreerrors', False):
raise DownloadError(message) if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1 self._download_retcode = 1
def report_warning(self, message): def report_warning(self, message):
@ -250,6 +260,18 @@ class FileDownloader(object):
warning_message=u'%s %s' % (_msg_header,message) warning_message=u'%s %s' % (_msg_header,message)
self.to_stderr(warning_message) self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if sys.stderr.isatty():
_msg_header = u'\033[0;31mERROR:\033[0m'
else:
_msg_header = u'ERROR:'
error_message = u'%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def slow_down(self, start_time, byte_counter): def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit.""" """Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None) rate_limit = self.params.get('ratelimit', None)
@ -281,7 +303,7 @@ class FileDownloader(object):
return return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err: except (IOError, OSError) as err:
self.trouble(u'ERROR: unable to rename file') self.report_error(u'unable to rename file')
def try_utime(self, filename, last_modified_hdr): def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file.""" """Try to set the last-modified time of the given file."""
@ -519,7 +541,7 @@ class FileDownloader(object):
if dn != '' and not os.path.exists(dn): # dn is already encoded if dn != '' and not os.path.exists(dn): # dn is already encoded
os.makedirs(dn) os.makedirs(dn)
except (OSError, IOError) as err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) self.report_error(u'unable to create directory ' + compat_str(err))
return return
if self.params.get('writedescription', False): if self.params.get('writedescription', False):
@ -529,7 +551,7 @@ class FileDownloader(object):
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description']) descfile.write(info_dict['description'])
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: Cannot write description file ' + descfn) self.report_error(u'Cannot write description file ' + descfn)
return return
if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
@ -538,14 +560,17 @@ class FileDownloader(object):
subtitle = info_dict['subtitles'][0] subtitle = info_dict['subtitles'][0]
(sub_error, sub_lang, sub) = subtitle (sub_error, sub_lang, sub) = subtitle
sub_format = self.params.get('subtitlesformat') sub_format = self.params.get('subtitlesformat')
try: if sub_error:
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format self.report_warning("Some error while getting the subtitles")
self.report_writesubtitles(sub_filename) else:
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: try:
subfile.write(sub) sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
except (OSError, IOError): self.report_writesubtitles(sub_filename)
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
return subfile.write(sub)
except (OSError, IOError):
self.report_error(u'Cannot write subtitles file ' + descfn)
return
if self.params.get('onlysubtitles', False): if self.params.get('onlysubtitles', False):
return return
@ -554,14 +579,17 @@ class FileDownloader(object):
sub_format = self.params.get('subtitlesformat') sub_format = self.params.get('subtitlesformat')
for subtitle in subtitles: for subtitle in subtitles:
(sub_error, sub_lang, sub) = subtitle (sub_error, sub_lang, sub) = subtitle
try: if sub_error:
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format self.report_warning("Some error while getting the subtitles")
self.report_writesubtitles(sub_filename) else:
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: try:
subfile.write(sub) sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
except (OSError, IOError): self.report_writesubtitles(sub_filename)
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
return subfile.write(sub)
except (OSError, IOError):
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
return
if self.params.get('onlysubtitles', False): if self.params.get('onlysubtitles', False):
return return
@ -572,7 +600,7 @@ class FileDownloader(object):
json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle']) json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
write_json_file(json_info_dict, encodeFilename(infofn)) write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) self.report_error(u'Cannot write metadata to JSON file ' + infofn)
return return
if not self.params.get('skip_download', False): if not self.params.get('skip_download', False):
@ -584,17 +612,17 @@ class FileDownloader(object):
except (OSError, IOError) as err: except (OSError, IOError) as err:
raise UnavailableVideoError() raise UnavailableVideoError()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.trouble(u'ERROR: unable to download video data: %s' % str(err)) self.report_error(u'unable to download video data: %s' % str(err))
return return
except (ContentTooShortError, ) as err: except (ContentTooShortError, ) as err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return return
if success: if success:
try: try:
self.post_process(filename, info_dict) self.post_process(filename, info_dict)
except (PostProcessingError) as err: except (PostProcessingError) as err:
self.trouble(u'ERROR: postprocessing: %s' % str(err)) self.report_error(u'postprocessing: %s' % str(err))
return return
def download(self, url_list): def download(self, url_list):
@ -611,6 +639,9 @@ class FileDownloader(object):
self.process_info(video) self.process_info(video)
except UnavailableVideoError: except UnavailableVideoError:
self.trouble(u'\nERROR: unable to download video') self.trouble(u'\nERROR: unable to download video')
except MaxDownloadsReached:
self.to_screen(u'[info] Maximum number of downloaded files reached.')
raise
return self._download_retcode return self._download_retcode
@ -645,7 +676,7 @@ class FileDownloader(object):
try: try:
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError): except (OSError, IOError):
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
return False return False
# Download using rtmpdump. rtmpdump returns exit code 2 when # Download using rtmpdump. rtmpdump returns exit code 2 when
@ -690,7 +721,8 @@ class FileDownloader(object):
}) })
return True return True
else: else:
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) self.to_stderr(u"\n")
self.report_error(u'rtmpdump exited with code %d' % retval)
return False return False
def _do_download(self, filename, info_dict): def _do_download(self, filename, info_dict):
@ -790,7 +822,7 @@ class FileDownloader(object):
self.report_retry(count, retries) self.report_retry(count, retries)
if count > retries: if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries) self.report_error(u'giving up after %s retries' % retries)
return False return False
data_len = data.info().get('Content-length', None) data_len = data.info().get('Content-length', None)
@ -826,12 +858,13 @@ class FileDownloader(object):
filename = self.undo_temp_name(tmpfilename) filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename) self.report_destination(filename)
except (OSError, IOError) as err: except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) self.report_error(u'unable to open for writing: %s' % str(err))
return False return False
try: try:
stream.write(data_block) stream.write(data_block)
except (IOError, OSError) as err: except (IOError, OSError) as err:
self.trouble(u'\nERROR: unable to write data: %s' % str(err)) self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
return False return False
if not self.params.get('noresizebuffer', False): if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block)) block_size = self.best_block_size(after - before, len(data_block))
@ -857,7 +890,8 @@ class FileDownloader(object):
self.slow_down(start, byte_counter - resume_len) self.slow_down(start, byte_counter - resume_len)
if stream is None: if stream is None:
self.trouble(u'\nERROR: Did not get any data blocks') self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False return False
stream.close() stream.close()
self.report_finish() self.report_finish()

View File

@ -275,11 +275,11 @@ class YoutubeIE(InfoExtractor):
try: try:
sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) return (u'unable to download video subtitles: %s' % compat_str(err), None)
sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
if not sub_lang_list: if not sub_lang_list:
return (u'WARNING: video doesn\'t have subtitles', None) return (u'video doesn\'t have subtitles', None)
return sub_lang_list return sub_lang_list
def _list_available_subtitles(self, video_id): def _list_available_subtitles(self, video_id):
@ -287,6 +287,10 @@ class YoutubeIE(InfoExtractor):
self.report_video_subtitles_available(video_id, sub_lang_list) self.report_video_subtitles_available(video_id, sub_lang_list)
def _request_subtitle(self, sub_lang, sub_name, video_id, format): def _request_subtitle(self, sub_lang, sub_name, video_id, format):
"""
Return tuple:
(error_message, sub_lang, sub)
"""
self.report_video_subtitles_request(video_id, sub_lang, format) self.report_video_subtitles_request(video_id, sub_lang, format)
params = compat_urllib_parse.urlencode({ params = compat_urllib_parse.urlencode({
'lang': sub_lang, 'lang': sub_lang,
@ -298,14 +302,20 @@ class YoutubeIE(InfoExtractor):
try: try:
sub = compat_urllib_request.urlopen(url).read().decode('utf-8') sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
if not sub: if not sub:
return (u'WARNING: Did not fetch video subtitles', None) return (u'Did not fetch video subtitles', None, None)
return (None, sub_lang, sub) return (None, sub_lang, sub)
def _extract_subtitle(self, video_id): def _extract_subtitle(self, video_id):
"""
Return a list with a tuple:
[(error_message, sub_lang, sub)]
"""
sub_lang_list = self._get_available_subtitles(video_id) sub_lang_list = self._get_available_subtitles(video_id)
sub_format = self._downloader.params.get('subtitlesformat') sub_format = self._downloader.params.get('subtitlesformat')
if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
return [(sub_lang_list[0], None, None)]
if self._downloader.params.get('subtitleslang', False): if self._downloader.params.get('subtitleslang', False):
sub_lang = self._downloader.params.get('subtitleslang') sub_lang = self._downloader.params.get('subtitleslang')
elif 'en' in sub_lang_list: elif 'en' in sub_lang_list:
@ -313,7 +323,7 @@ class YoutubeIE(InfoExtractor):
else: else:
sub_lang = list(sub_lang_list.keys())[0] sub_lang = list(sub_lang_list.keys())[0]
if not sub_lang in sub_lang_list: if not sub_lang in sub_lang_list:
return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None) return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
return [subtitle] return [subtitle]
@ -321,6 +331,8 @@ class YoutubeIE(InfoExtractor):
def _extract_all_subtitles(self, video_id): def _extract_all_subtitles(self, video_id):
sub_lang_list = self._get_available_subtitles(video_id) sub_lang_list = self._get_available_subtitles(video_id)
sub_format = self._downloader.params.get('subtitlesformat') sub_format = self._downloader.params.get('subtitlesformat')
if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
return [(sub_lang_list[0], None, None)]
subtitles = [] subtitles = []
for sub_lang in sub_lang_list: for sub_lang in sub_lang_list:
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
@ -433,13 +445,13 @@ class YoutubeIE(InfoExtractor):
self.report_age_confirmation() self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read().decode('utf-8') age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
return return
def _extract_id(self, url): def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
return video_id return video_id
@ -458,7 +470,7 @@ class YoutubeIE(InfoExtractor):
try: try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read() video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
return return
video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
@ -483,18 +495,18 @@ class YoutubeIE(InfoExtractor):
if 'token' in video_info: if 'token' in video_info:
break break
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
return return
if 'token' not in video_info: if 'token' not in video_info:
if 'reason' in video_info: if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
else: else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
return return
# Check for "rental" videos # Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
self._downloader.trouble(u'ERROR: "rental" videos not supported') self._downloader.report_error(u'"rental" videos not supported')
return return
# Start extracting information # Start extracting information
@ -502,7 +514,7 @@ class YoutubeIE(InfoExtractor):
# uploader # uploader
if 'author' not in video_info: if 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader name') self._downloader.report_error(u'unable to extract uploader name')
return return
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
@ -512,17 +524,17 @@ class YoutubeIE(InfoExtractor):
if mobj is not None: if mobj is not None:
video_uploader_id = mobj.group(1) video_uploader_id = mobj.group(1)
else: else:
self._downloader.trouble(u'WARNING: unable to extract uploader nickname') self._downloader.report_warning(u'unable to extract uploader nickname')
# title # title
if 'title' not in video_info: if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# thumbnail image # thumbnail image
if 'thumbnail_url' not in video_info: if 'thumbnail_url' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail') self._downloader.report_warning(u'unable to extract video thumbnail')
video_thumbnail = '' video_thumbnail = ''
else: # don't panic if we can't find it else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
@ -554,21 +566,21 @@ class YoutubeIE(InfoExtractor):
if video_subtitles: if video_subtitles:
(sub_error, sub_lang, sub) = video_subtitles[0] (sub_error, sub_lang, sub) = video_subtitles[0]
if sub_error: if sub_error:
self._downloader.trouble(sub_error) self._downloader.report_error(sub_error)
if self._downloader.params.get('allsubtitles', False): if self._downloader.params.get('allsubtitles', False):
video_subtitles = self._extract_all_subtitles(video_id) video_subtitles = self._extract_all_subtitles(video_id)
for video_subtitle in video_subtitles: for video_subtitle in video_subtitles:
(sub_error, sub_lang, sub) = video_subtitle (sub_error, sub_lang, sub) = video_subtitle
if sub_error: if sub_error:
self._downloader.trouble(sub_error) self._downloader.report_error(sub_error)
if self._downloader.params.get('listsubtitles', False): if self._downloader.params.get('listsubtitles', False):
sub_lang_list = self._list_available_subtitles(video_id) sub_lang_list = self._list_available_subtitles(video_id)
return return
if 'length_seconds' not in video_info: if 'length_seconds' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video duration') self._downloader.report_warning(u'unable to extract video duration')
video_duration = '' video_duration = ''
else: else:
video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
@ -596,7 +608,7 @@ class YoutubeIE(InfoExtractor):
format_list = available_formats format_list = available_formats
existing_formats = [x for x in format_list if x in url_map] existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0: if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video') self._downloader.report_error(u'no known formats available for video')
return return
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats) self._print_formats(existing_formats)
@ -617,10 +629,10 @@ class YoutubeIE(InfoExtractor):
video_url_list = [(rf, url_map[rf])] video_url_list = [(rf, url_map[rf])]
break break
if video_url_list is None: if video_url_list is None:
self._downloader.trouble(u'ERROR: requested format not available') self._downloader.report_error(u'requested format not available')
return return
else: else:
self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
return return
results = [] results = []
@ -683,7 +695,7 @@ class MetacafeIE(InfoExtractor):
self.report_disclaimer() self.report_disclaimer()
disclaimer = compat_urllib_request.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
return return
# Confirm age # Confirm age
@ -696,14 +708,14 @@ class MetacafeIE(InfoExtractor):
self.report_age_confirmation() self.report_age_confirmation()
disclaimer = compat_urllib_request.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -720,7 +732,7 @@ class MetacafeIE(InfoExtractor):
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader and title from webpage # Extract URL, uploader and title from webpage
@ -740,15 +752,15 @@ class MetacafeIE(InfoExtractor):
else: else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
vardict = compat_parse_qs(mobj.group(1)) vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict: if 'mediaData' not in vardict:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
mediaURL = mobj.group(1).replace('\\/', '/') mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:] video_extension = mediaURL[-3:]
@ -756,13 +768,13 @@ class MetacafeIE(InfoExtractor):
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage) mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.report_error(u'unable to extract uploader nickname')
return return
video_uploader = mobj.group(1) video_uploader = mobj.group(1)
@ -794,7 +806,7 @@ class DailymotionIE(InfoExtractor):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1).split('_')[0].split('?')[0] video_id = mobj.group(1).split('_')[0].split('?')[0]
@ -810,7 +822,7 @@ class DailymotionIE(InfoExtractor):
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'\s*var flashvars = (.*)', webpage) mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
flashvars = compat_urllib_parse.unquote(mobj.group(1)) flashvars = compat_urllib_parse.unquote(mobj.group(1))
@ -820,12 +832,12 @@ class DailymotionIE(InfoExtractor):
self._downloader.to_screen(u'[dailymotion] Using %s' % key) self._downloader.to_screen(u'[dailymotion] Using %s' % key)
break break
else: else:
self._downloader.trouble(u'ERROR: unable to extract video URL') self._downloader.report_error(u'unable to extract video URL')
return return
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video URL') self._downloader.report_error(u'unable to extract video URL')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
@ -834,7 +846,7 @@ class DailymotionIE(InfoExtractor):
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage) mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_title = unescapeHTML(mobj.group('title')) video_title = unescapeHTML(mobj.group('title'))
@ -844,7 +856,7 @@ class DailymotionIE(InfoExtractor):
# lookin for official user # lookin for official user
mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
if mobj_official is None: if mobj_official is None:
self._downloader.trouble(u'WARNING: unable to extract uploader nickname') self._downloader.report_warning(u'unable to extract uploader nickname')
else: else:
video_uploader = mobj_official.group(1) video_uploader = mobj_official.group(1)
else: else:
@ -886,7 +898,7 @@ class PhotobucketIE(InfoExtractor):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -899,14 +911,14 @@ class PhotobucketIE(InfoExtractor):
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader, and title from webpage # Extract URL, uploader, and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
mediaURL = compat_urllib_parse.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
@ -914,7 +926,7 @@ class PhotobucketIE(InfoExtractor):
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
@ -955,7 +967,7 @@ class YahooIE(InfoExtractor):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
@ -968,18 +980,18 @@ class YahooIE(InfoExtractor):
try: try:
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract id field') self._downloader.report_error(u'Unable to extract id field')
return return
yahoo_id = mobj.group(1) yahoo_id = mobj.group(1)
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract vid field') self._downloader.report_error(u'Unable to extract vid field')
return return
yahoo_vid = mobj.group(1) yahoo_vid = mobj.group(1)
@ -992,34 +1004,34 @@ class YahooIE(InfoExtractor):
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract uploader and title from webpage # Extract uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader') self._downloader.report_error(u'unable to extract video uploader')
return return
video_uploader = mobj.group(1).decode('utf-8') video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.report_error(u'unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(1).decode('utf-8') video_thumbnail = mobj.group(1).decode('utf-8')
# Extract video description # Extract video description
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description') self._downloader.report_error(u'unable to extract video description')
return return
video_description = mobj.group(1).decode('utf-8') video_description = mobj.group(1).decode('utf-8')
if not video_description: if not video_description:
@ -1028,13 +1040,13 @@ class YahooIE(InfoExtractor):
# Extract video height and width # Extract video height and width
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video height') self._downloader.report_error(u'unable to extract video height')
return return
yv_video_height = mobj.group(1) yv_video_height = mobj.group(1)
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video width') self._downloader.report_error(u'unable to extract video width')
return return
yv_video_width = mobj.group(1) yv_video_width = mobj.group(1)
@ -1050,13 +1062,13 @@ class YahooIE(InfoExtractor):
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract media URL from playlist XML # Extract media URL from playlist XML
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract media URL') self._downloader.report_error(u'Unable to extract media URL')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
video_url = unescapeHTML(video_url) video_url = unescapeHTML(video_url)
@ -1095,7 +1107,7 @@ class VimeoIE(InfoExtractor):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
video_id = mobj.group('id') video_id = mobj.group('id')
@ -1111,7 +1123,7 @@ class VimeoIE(InfoExtractor):
webpage_bytes = compat_urllib_request.urlopen(request).read() webpage_bytes = compat_urllib_request.urlopen(request).read()
webpage = webpage_bytes.decode('utf-8') webpage = webpage_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Now we begin extracting as much information as we can from what we # Now we begin extracting as much information as we can from what we
@ -1124,7 +1136,7 @@ class VimeoIE(InfoExtractor):
config = webpage.split(' = {config:')[1].split(',assets:')[0] config = webpage.split(' = {config:')[1].split(',assets:')[0]
config = json.loads(config) config = json.loads(config)
except: except:
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.report_error(u'unable to extract info section')
return return
# Extract title # Extract title
@ -1174,7 +1186,7 @@ class VimeoIE(InfoExtractor):
self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break break
else: else:
self._downloader.trouble(u'ERROR: no known codec found') self._downloader.report_error(u'no known codec found')
return return
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
@ -1218,10 +1230,10 @@ class ArteTvIE(InfoExtractor):
self.report_download_webpage(url) self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
except ValueError as err: except ValueError as err:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
return webpage return webpage
@ -1231,7 +1243,7 @@ class ArteTvIE(InfoExtractor):
info = {} info = {}
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
for (i, key, err) in matchTuples: for (i, key, err) in matchTuples:
@ -1416,7 +1428,7 @@ class GenericIE(InfoExtractor):
except ValueError as err: except ValueError as err:
# since this is the last-resort InfoExtractor, if # since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here # this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
self.report_extraction(video_id) self.report_extraction(video_id)
@ -1429,13 +1441,13 @@ class GenericIE(InfoExtractor):
# Broaden the search a little bit: JWPlayer JS loader # Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage) mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
# It's possible that one of the regexes # It's possible that one of the regexes
# matched, but returned an empty group: # matched, but returned an empty group:
if mobj.group(1) is None: if mobj.group(1) is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
video_url = compat_urllib_parse.unquote(mobj.group(1)) video_url = compat_urllib_parse.unquote(mobj.group(1))
@ -1453,14 +1465,14 @@ class GenericIE(InfoExtractor):
# and so on and so forth; it's just not practical # and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage) mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
# video uploader is domain name # video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_uploader = mobj.group(1) video_uploader = mobj.group(1)
@ -1492,7 +1504,7 @@ class YoutubeSearchIE(InfoExtractor):
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.report_error(u'invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
@ -1508,7 +1520,7 @@ class YoutubeSearchIE(InfoExtractor):
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_youtube_results: elif n > self._max_youtube_results:
self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
@ -1533,7 +1545,7 @@ class YoutubeSearchIE(InfoExtractor):
try: try:
data = compat_urllib_request.urlopen(request).read().decode('utf-8') data = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
return return
api_response = json.loads(data)['data'] api_response = json.loads(data)['data']
@ -1574,7 +1586,7 @@ class GoogleSearchIE(InfoExtractor):
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.report_error(u'invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
@ -1590,7 +1602,7 @@ class GoogleSearchIE(InfoExtractor):
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_google_results: elif n > self._max_google_results:
self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
@ -1614,7 +1626,7 @@ class GoogleSearchIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
@ -1658,7 +1670,7 @@ class YahooSearchIE(InfoExtractor):
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.report_error(u'invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
@ -1674,7 +1686,7 @@ class YahooSearchIE(InfoExtractor):
try: try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_yahoo_results: elif n > self._max_yahoo_results:
self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
@ -1699,7 +1711,7 @@ class YahooSearchIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
@ -1732,9 +1744,7 @@ class YoutubePlaylistIE(InfoExtractor):
(?: (?:
(?:course|view_play_list|my_playlists|artist|playlist|watch) (?:course|view_play_list|my_playlists|artist|playlist|watch)
\? (?:.*?&)*? (?:p|a|list)= \? (?:.*?&)*? (?:p|a|list)=
| user/.*?/user/
| p/ | p/
| user/.*?#[pg]/c/
) )
((?:PL|EC|UU)?[0-9A-Za-z-_]{10,}) ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
.* .*
@ -1761,7 +1771,7 @@ class YoutubePlaylistIE(InfoExtractor):
# Extract playlist id # Extract playlist id
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.report_error(u'invalid url: %s' % url)
return return
# Download playlist videos from API # Download playlist videos from API
@ -1776,17 +1786,17 @@ class YoutubePlaylistIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(url).read().decode('utf8') page = compat_urllib_request.urlopen(url).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
try: try:
response = json.loads(page) response = json.loads(page)
except ValueError as err: except ValueError as err:
self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err)) self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
return return
if not 'feed' in response or not 'entry' in response['feed']: if not 'feed' in response or not 'entry' in response['feed']:
self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API') self._downloader.report_error(u'Got a malformed response from YouTube API')
return return
videos += [ (entry['yt$position']['$t'], entry['content']['src']) videos += [ (entry['yt$position']['$t'], entry['content']['src'])
for entry in response['feed']['entry'] for entry in response['feed']['entry']
@ -1831,7 +1841,7 @@ class YoutubeChannelIE(InfoExtractor):
# Extract channel id # Extract channel id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.report_error(u'invalid url: %s' % url)
return return
# Download channel pages # Download channel pages
@ -1846,7 +1856,7 @@ class YoutubeChannelIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf8') page = compat_urllib_request.urlopen(request).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
@ -1889,7 +1899,7 @@ class YoutubeUserIE(InfoExtractor):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.report_error(u'invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
@ -1911,7 +1921,7 @@ class YoutubeUserIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
@ -1970,7 +1980,7 @@ class BlipTVUserIE(InfoExtractor):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.report_error(u'invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
@ -1984,7 +1994,7 @@ class BlipTVUserIE(InfoExtractor):
mobj = re.search(r'data-users-id="([^"]+)"', page) mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1) page_base = page_base % mobj.group(1)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
@ -2003,7 +2013,7 @@ class BlipTVUserIE(InfoExtractor):
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.report_error(u'unable to download webpage: %s' % str(err))
return return
# Extract video identifiers # Extract video identifiers
@ -2068,7 +2078,7 @@ class DepositFilesIE(InfoExtractor):
self.report_download_webpage(file_id) self.report_download_webpage(file_id)
webpage = compat_urllib_request.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
return return
# Search for the real file URL # Search for the real file URL
@ -2078,9 +2088,9 @@ class DepositFilesIE(InfoExtractor):
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None): if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
self._downloader.trouble(u'ERROR: %s' % restriction_message) self._downloader.report_error(u'%s' % restriction_message)
else: else:
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) self._downloader.report_error(u'unable to extract download URL from: %s' % url)
return return
file_url = mobj.group(1) file_url = mobj.group(1)
@ -2089,7 +2099,7 @@ class DepositFilesIE(InfoExtractor):
# Search for file title # Search for file title
mobj = re.search(r'<b title="(.*?)">', webpage) mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
file_title = mobj.group(1).decode('utf-8') file_title = mobj.group(1).decode('utf-8')
@ -2162,7 +2172,7 @@ class FacebookIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('ID') video_id = mobj.group('ID')
@ -2218,7 +2228,7 @@ class BlipTVIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
urlp = compat_urllib_parse_urlparse(url) urlp = compat_urllib_parse_urlparse(url)
@ -2265,7 +2275,7 @@ class BlipTVIE(InfoExtractor):
json_code_bytes = urlh.read() json_code_bytes = urlh.read()
json_code = json_code_bytes.decode('utf-8') json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
return return
try: try:
@ -2296,7 +2306,7 @@ class BlipTVIE(InfoExtractor):
'user_agent': 'iTunes/10.6.1', 'user_agent': 'iTunes/10.6.1',
} }
except (ValueError,KeyError) as err: except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
return return
return [info] return [info]
@ -2318,7 +2328,7 @@ class MyVideoIE(InfoExtractor):
def _real_extract(self,url): def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._download.trouble(u'ERROR: invalid URL: %s' % url) self._download.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -2328,16 +2338,16 @@ class MyVideoIE(InfoExtractor):
webpage = self._download_webpage(webpage_url, video_id) webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />', mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
webpage) webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.report_error(u'unable to extract media URL')
return return
video_url = mobj.group(1) + ('/%s.flv' % video_id) video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage) mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
@ -2410,7 +2420,7 @@ class ComedyCentralIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
if mobj.group('shortname'): if mobj.group('shortname'):
@ -2441,16 +2451,16 @@ class ComedyCentralIE(InfoExtractor):
html = htmlHandle.read() html = htmlHandle.read()
webpage = html.decode('utf-8') webpage = html.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
if dlNewest: if dlNewest:
url = htmlHandle.geturl() url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) self._downloader.report_error(u'Invalid redirected URL: ' + url)
return return
if mobj.group('episode') == '': if mobj.group('episode') == '':
self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
return return
epTitle = mobj.group('episode') epTitle = mobj.group('episode')
@ -2463,7 +2473,7 @@ class ComedyCentralIE(InfoExtractor):
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage) altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
if len(altMovieParams) == 0: if len(altMovieParams) == 0:
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
return return
else: else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
@ -2474,7 +2484,7 @@ class ComedyCentralIE(InfoExtractor):
try: try:
indexXml = compat_urllib_request.urlopen(indexUrl).read() indexXml = compat_urllib_request.urlopen(indexUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
return return
results = [] results = []
@ -2495,7 +2505,7 @@ class ComedyCentralIE(InfoExtractor):
try: try:
configXml = compat_urllib_request.urlopen(configReq).read() configXml = compat_urllib_request.urlopen(configReq).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return return
cdoc = xml.etree.ElementTree.fromstring(configXml) cdoc = xml.etree.ElementTree.fromstring(configXml)
@ -2505,7 +2515,7 @@ class ComedyCentralIE(InfoExtractor):
turls.append(finfo) turls.append(finfo)
if len(turls) == 0: if len(turls) == 0:
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
continue continue
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
@ -2562,7 +2572,7 @@ class EscapistIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
showName = mobj.group('showname') showName = mobj.group('showname')
videoId = mobj.group('episode') videoId = mobj.group('episode')
@ -2574,7 +2584,7 @@ class EscapistIE(InfoExtractor):
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
return return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
@ -2592,7 +2602,7 @@ class EscapistIE(InfoExtractor):
m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type']) m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8') configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
return return
# Technically, it's JavaScript, not JSON # Technically, it's JavaScript, not JSON
@ -2601,7 +2611,7 @@ class EscapistIE(InfoExtractor):
try: try:
config = json.loads(configJSON) config = json.loads(configJSON)
except (ValueError,) as err: except (ValueError,) as err:
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
return return
playlist = config['playlist'] playlist = config['playlist']
@ -2639,7 +2649,7 @@ class CollegeHumorIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
@ -2654,7 +2664,7 @@ class CollegeHumorIE(InfoExtractor):
try: try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read() metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
@ -2665,7 +2675,7 @@ class CollegeHumorIE(InfoExtractor):
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
manifest_url = videoNode.findall('./file')[0].text manifest_url = videoNode.findall('./file')[0].text
except IndexError: except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file') self._downloader.report_error(u'Invalid metadata XML file')
return return
manifest_url += '?hdcore=2.10.3' manifest_url += '?hdcore=2.10.3'
@ -2673,7 +2683,7 @@ class CollegeHumorIE(InfoExtractor):
try: try:
manifestXml = compat_urllib_request.urlopen(manifest_url).read() manifestXml = compat_urllib_request.urlopen(manifest_url).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return return
adoc = xml.etree.ElementTree.fromstring(manifestXml) adoc = xml.etree.ElementTree.fromstring(manifestXml)
@ -2682,7 +2692,7 @@ class CollegeHumorIE(InfoExtractor):
node_id = media_node.attrib['url'] node_id = media_node.attrib['url']
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
except IndexError as err: except IndexError as err:
self._downloader.trouble(u'\nERROR: Invalid manifest file') self._downloader.report_error(u'Invalid manifest file')
return return
url_pr = compat_urllib_parse_urlparse(manifest_url) url_pr = compat_urllib_parse_urlparse(manifest_url)
@ -2706,7 +2716,7 @@ class XVideosIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -2718,7 +2728,7 @@ class XVideosIE(InfoExtractor):
# Extract video URL # Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage) mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.report_error(u'unable to extract video url')
return return
video_url = compat_urllib_parse.unquote(mobj.group(1)) video_url = compat_urllib_parse.unquote(mobj.group(1))
@ -2726,7 +2736,7 @@ class XVideosIE(InfoExtractor):
# Extract title # Extract title
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage) mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
@ -2734,7 +2744,7 @@ class XVideosIE(InfoExtractor):
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.report_error(u'unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(0) video_thumbnail = mobj.group(0)
@ -2778,7 +2788,7 @@ class SoundcloudIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
# extract uploader (which is in the url) # extract uploader (which is in the url)
@ -2796,7 +2806,7 @@ class SoundcloudIE(InfoExtractor):
info_json_bytes = compat_urllib_request.urlopen(request).read() info_json_bytes = compat_urllib_request.urlopen(request).read()
info_json = info_json_bytes.decode('utf-8') info_json = info_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
return return
info = json.loads(info_json) info = json.loads(info_json)
@ -2809,7 +2819,7 @@ class SoundcloudIE(InfoExtractor):
stream_json_bytes = compat_urllib_request.urlopen(request).read() stream_json_bytes = compat_urllib_request.urlopen(request).read()
stream_json = stream_json_bytes.decode('utf-8') stream_json = stream_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err)) self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
return return
streams = json.loads(stream_json) streams = json.loads(stream_json)
@ -2825,6 +2835,87 @@ class SoundcloudIE(InfoExtractor):
'description': info['description'], 'description': info['description'],
}] }]
class SoundcloudSetIE(InfoExtractor):
"""Information extractor for soundcloud.com sets
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
IE_NAME = u'soundcloud'
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_resolve(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
# extract uploader (which is in the url)
uploader = mobj.group(1)
# extract simple title (uploader + slug of song title)
slug_title = mobj.group(2)
simple_title = uploader + u'-' + slug_title
self.report_resolve('%s/sets/%s' % (uploader, slug_title))
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
request = compat_urllib_request.Request(resolv_url)
try:
info_json_bytes = compat_urllib_request.urlopen(request).read()
info_json = info_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return
videos = []
info = json.loads(info_json)
if 'errors' in info:
for err in info['errors']:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err['error_message']))
return
for track in info['tracks']:
video_id = track['id']
self.report_extraction('%s/sets/%s' % (uploader, slug_title))
streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
request = compat_urllib_request.Request(streams_url)
try:
stream_json_bytes = compat_urllib_request.urlopen(request).read()
stream_json = stream_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
return
streams = json.loads(stream_json)
mediaURL = streams['http_mp3_128_url']
videos.append({
'id': video_id,
'url': mediaURL,
'uploader': track['user']['username'],
'upload_date': track['created_at'],
'title': track['title'],
'ext': u'mp3',
'description': track['description'],
})
return videos
class InfoQIE(InfoExtractor): class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com""" """Information extractor for infoq.com"""
@ -2837,7 +2928,7 @@ class InfoQIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
webpage = self._download_webpage(url, video_id=url) webpage = self._download_webpage(url, video_id=url)
@ -2846,7 +2937,7 @@ class InfoQIE(InfoExtractor):
# Extract video URL # Extract video URL
mobj = re.search(r"jsclassref='([^']*)'", webpage) mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.report_error(u'unable to extract video url')
return return
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8')) real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
@ -2854,7 +2945,7 @@ class InfoQIE(InfoExtractor):
# Extract title # Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage) mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
@ -2937,7 +3028,7 @@ class MixcloudIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
# extract uploader & filename from url # extract uploader & filename from url
uploader = mobj.group(1).decode('utf-8') uploader = mobj.group(1).decode('utf-8')
@ -2951,7 +3042,7 @@ class MixcloudIE(InfoExtractor):
self.report_download_json(file_url) self.report_download_json(file_url)
jsonData = compat_urllib_request.urlopen(request).read() jsonData = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
return return
# parse JSON # parse JSON
@ -2975,7 +3066,7 @@ class MixcloudIE(InfoExtractor):
break # got it! break # got it!
else: else:
if req_format not in formats: if req_format not in formats:
self._downloader.trouble(u'ERROR: format is not available') self._downloader.report_error(u'format is not available')
return return
url_list = self.get_urls(formats, req_format) url_list = self.get_urls(formats, req_format)
@ -3029,14 +3120,14 @@ class StanfordOpenClassroomIE(InfoExtractor):
try: try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read() metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
try: try:
info['title'] = mdoc.findall('./title')[0].text info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError: except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file') self._downloader.report_error(u'Invalid metadata XML file')
return return
info['ext'] = info['url'].rpartition('.')[2] info['ext'] = info['url'].rpartition('.')[2]
return [info] return [info]
@ -3088,7 +3179,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
try: try:
rootpage = compat_urllib_request.urlopen(rootURL).read() rootpage = compat_urllib_request.urlopen(rootURL).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
return return
info['title'] = info['id'] info['title'] = info['id']
@ -3120,7 +3211,7 @@ class MTVIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
if not mobj.group('proto'): if not mobj.group('proto'):
url = 'http://' + url url = 'http://' + url
@ -3130,25 +3221,25 @@ class MTVIE(InfoExtractor):
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract song name') self._downloader.report_error(u'unable to extract song name')
return return
song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract performer') self._downloader.report_error(u'unable to extract performer')
return return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to mtvn_uri') self._downloader.report_error(u'unable to mtvn_uri')
return return
mtvn_uri = mobj.group(1) mtvn_uri = mobj.group(1)
mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract content id') self._downloader.report_error(u'unable to extract content id')
return return
content_id = mobj.group(1) content_id = mobj.group(1)
@ -3158,7 +3249,7 @@ class MTVIE(InfoExtractor):
try: try:
metadataXml = compat_urllib_request.urlopen(request).read() metadataXml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metadataXml) mdoc = xml.etree.ElementTree.fromstring(metadataXml)
@ -3230,7 +3321,7 @@ class YoukuIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('ID') video_id = mobj.group('ID')
@ -3241,7 +3332,7 @@ class YoukuIE(InfoExtractor):
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
jsondata = compat_urllib_request.urlopen(request).read() jsondata = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extraction(video_id) self.report_extraction(video_id)
@ -3272,7 +3363,7 @@ class YoukuIE(InfoExtractor):
fileid = config['data'][0]['streamfileids'][format] fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]] keys = [s['k'] for s in config['data'][0]['segs'][format]]
except (UnicodeDecodeError, ValueError, KeyError): except (UnicodeDecodeError, ValueError, KeyError):
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.report_error(u'unable to extract info section')
return return
files_info=[] files_info=[]
@ -3319,7 +3410,7 @@ class XNXXIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -3330,24 +3421,24 @@ class XNXXIE(InfoExtractor):
webpage_bytes = compat_urllib_request.urlopen(url).read() webpage_bytes = compat_urllib_request.urlopen(url).read()
webpage = webpage_bytes.decode('utf-8') webpage = webpage_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) self._downloader.report_error(u'unable to download video webpage: %s' % err)
return return
result = re.search(self.VIDEO_URL_RE, webpage) result = re.search(self.VIDEO_URL_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.report_error(u'unable to extract video url')
return return
video_url = compat_urllib_parse.unquote(result.group(1)) video_url = compat_urllib_parse.unquote(result.group(1))
result = re.search(self.VIDEO_TITLE_RE, webpage) result = re.search(self.VIDEO_TITLE_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
video_title = result.group(1) video_title = result.group(1)
result = re.search(self.VIDEO_THUMB_RE, webpage) result = re.search(self.VIDEO_THUMB_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.report_error(u'unable to extract video thumbnail')
return return
video_thumbnail = result.group(1) video_thumbnail = result.group(1)
@ -3396,7 +3487,7 @@ class GooglePlusIE(InfoExtractor):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.report_error(u'Invalid URL: %s' % url)
return return
post_url = mobj.group(0) post_url = mobj.group(0)
@ -3410,7 +3501,7 @@ class GooglePlusIE(InfoExtractor):
try: try:
webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
return return
# Extract update date # Extract update date
@ -3445,14 +3536,14 @@ class GooglePlusIE(InfoExtractor):
pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video page URL') self._downloader.report_error(u'unable to extract video page URL')
video_page = mobj.group(1) video_page = mobj.group(1)
request = compat_urllib_request.Request(video_page) request = compat_urllib_request.Request(video_page)
try: try:
webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extract_vid_page(video_page) self.report_extract_vid_page(video_page)
@ -3462,7 +3553,7 @@ class GooglePlusIE(InfoExtractor):
pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage) mobj = re.findall(pattern, webpage)
if len(mobj) == 0: if len(mobj) == 0:
self._downloader.trouble(u'ERROR: unable to extract video links') self._downloader.report_error(u'unable to extract video links')
# Sort in resolution # Sort in resolution
links = sorted(mobj) links = sorted(mobj)
@ -3494,7 +3585,7 @@ class NBAIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
@ -3550,13 +3641,13 @@ class JustinTVIE(InfoExtractor):
webpage_bytes = urlh.read() webpage_bytes = urlh.read()
webpage = webpage_bytes.decode('utf-8', 'ignore') webpage = webpage_bytes.decode('utf-8', 'ignore')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
return return
response = json.loads(webpage) response = json.loads(webpage)
if type(response) != list: if type(response) != list:
error_text = response.get('error', 'unknown error') error_text = response.get('error', 'unknown error')
self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text) self._downloader.report_error(u'Justin.tv API: %s' % error_text)
return return
info = [] info = []
for clip in response: for clip in response:
@ -3581,7 +3672,7 @@ class JustinTVIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
api = 'http://api.justin.tv' api = 'http://api.justin.tv'
@ -3616,7 +3707,7 @@ class FunnyOrDieIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('id') video_id = mobj.group('id')
@ -3624,13 +3715,13 @@ class FunnyOrDieIE(InfoExtractor):
m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL) m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
if not m: if not m:
self._downloader.trouble(u'ERROR: unable to find video information') self._downloader.report_error(u'unable to find video information')
video_url = unescapeHTML(m.group('url')) video_url = unescapeHTML(m.group('url'))
m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage) m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
if not m: if not m:
self._downloader.trouble(u'Cannot find video title') self._downloader.trouble(u'Cannot find video title')
title = unescapeHTML(m.group('title')) title = clean_html(m.group('title'))
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage) m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
if m: if m:
@ -3677,7 +3768,7 @@ class SteamIE(InfoExtractor):
video_url = vid.group('videoURL') video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail') video_thumb = thumb.group('thumbnail')
if not video_url: if not video_url:
self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) self._downloader.report_error(u'Cannot find video url for %s' % video_id)
info = { info = {
'id':video_id, 'id':video_id,
'url':video_url, 'url':video_url,
@ -3710,6 +3801,62 @@ class UstreamIE(InfoExtractor):
} }
return [info] return [info]
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
IE_NAME = u'WorldStarHipHop'
def _real_extract(self, url):
_src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
webpage_src = compat_urllib_request.urlopen(url).read()
webpage_src = webpage_src.decode('utf-8')
mobj = re.search(_src_url, webpage_src)
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
if mobj is not None:
video_url = mobj.group()
if 'mp4' in video_url:
ext = 'mp4'
else:
ext = 'flv'
else:
self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
return
_title = r"""<title>(.*)</title>"""
mobj = re.search(_title, webpage_src)
if mobj is not None:
title = mobj.group(1)
else:
title = 'World Start Hip Hop - %s' % time.ctime()
_thumbnail = r"""rel="image_src" href="(.*)" />"""
mobj = re.search(_thumbnail, webpage_src)
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
if mobj is not None:
thumbnail = mobj.group(1)
else:
_title = r"""candytitles.*>(.*)</span>"""
mobj = re.search(_title, webpage_src)
if mobj is not None:
title = mobj.group(1)
thumbnail = None
results = [{
'id': video_id,
'url' : video_url,
'title' : title,
'thumbnail' : thumbnail,
'ext' : ext,
}]
return results
class RBMARadioIE(InfoExtractor): class RBMARadioIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$' _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
@ -3767,7 +3914,7 @@ class YouPornIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
@ -3859,7 +4006,7 @@ class YouPornIE(InfoExtractor):
else: else:
format = self._specific( req_format, formats ) format = self._specific( req_format, formats )
if result is None: if result is None:
self._downloader.trouble(u'ERROR: requested format not available') self._downloader.report_error(u'requested format not available')
return return
return [format] return [format]
@ -3872,7 +4019,7 @@ class PornotubeIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
@ -3885,7 +4032,7 @@ class PornotubeIE(InfoExtractor):
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",' VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
result = re.search(VIDEO_URL_RE, webpage) result = re.search(VIDEO_URL_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.report_error(u'unable to extract video url')
return return
video_url = compat_urllib_parse.unquote(result.group('url')) video_url = compat_urllib_parse.unquote(result.group('url'))
@ -3893,7 +4040,7 @@ class PornotubeIE(InfoExtractor):
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
result = re.search(VIDEO_UPLOADED_RE, webpage) result = re.search(VIDEO_UPLOADED_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.report_error(u'unable to extract video title')
return return
upload_date = result.group('date') upload_date = result.group('date')
@ -3914,7 +4061,7 @@ class YouJizzIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.report_error(u'invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
@ -4074,7 +4221,7 @@ class TEDIE(InfoExtractor):
videoName=m.group('name') videoName=m.group('name')
webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName) webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
# If the url includes the language we get the title translated # If the url includes the language we get the title translated
title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>' title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
title=re.search(title_RE, webpage).group('title') title=re.search(title_RE, webpage).group('title')
info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?) info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
"id":(?P<videoID>[\d]+).*? "id":(?P<videoID>[\d]+).*?
@ -4115,13 +4262,13 @@ class MySpassIE(InfoExtractor):
# extract values from metadata # extract values from metadata
url_flv_el = metadata.find('url_flv') url_flv_el = metadata.find('url_flv')
if url_flv_el is None: if url_flv_el is None:
self._downloader.trouble(u'ERROR: unable to extract download url') self._downloader.report_error(u'unable to extract download url')
return return
video_url = url_flv_el.text video_url = url_flv_el.text
extension = os.path.splitext(video_url)[1][1:] extension = os.path.splitext(video_url)[1][1:]
title_el = metadata.find('title') title_el = metadata.find('title')
if title_el is None: if title_el is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.report_error(u'unable to extract title')
return return
title = title_el.text title = title_el.text
format_id_el = metadata.find('format_id') format_id_el = metadata.find('format_id')
@ -4151,7 +4298,7 @@ class MySpassIE(InfoExtractor):
return [info] return [info]
class SpiegelIE(InfoExtractor): class SpiegelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?$' _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url) m = re.match(self._VALID_URL, url)
@ -4183,6 +4330,55 @@ class SpiegelIE(InfoExtractor):
} }
return [info] return [info]
class LiveLeakIE(InfoExtractor):
_VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
IE_NAME = u'liveleak'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group('video_id')
webpage = self._download_webpage(url, video_id)
m = re.search(r'file: "(.*?)",', webpage)
if not m:
self._downloader.report_error(u'unable to find video url')
return
video_url = m.group(1)
m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
if not m:
self._downloader.trouble(u'Cannot find video title')
title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
if m:
desc = unescapeHTML(m.group('desc'))
else:
desc = None
m = re.search(r'By:.*?(\w+)</a>', webpage)
if m:
uploader = clean_html(m.group(1))
else:
uploader = None
info = {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': desc,
'uploader': uploader
}
return [info]
def gen_extractors(): def gen_extractors():
""" Return a list of an instance of every supported extractor. """ Return a list of an instance of every supported extractor.
@ -4210,6 +4406,7 @@ def gen_extractors():
EscapistIE(), EscapistIE(),
CollegeHumorIE(), CollegeHumorIE(),
XVideosIE(), XVideosIE(),
SoundcloudSetIE(),
SoundcloudIE(), SoundcloudIE(),
InfoQIE(), InfoQIE(),
MixcloudIE(), MixcloudIE(),
@ -4223,6 +4420,7 @@ def gen_extractors():
GooglePlusIE(), GooglePlusIE(),
ArteTvIE(), ArteTvIE(),
NBAIE(), NBAIE(),
WorldStarHipHopIE(),
JustinTVIE(), JustinTVIE(),
FunnyOrDieIE(), FunnyOrDieIE(),
SteamIE(), SteamIE(),
@ -4233,7 +4431,6 @@ def gen_extractors():
TEDIE(), TEDIE(),
MySpassIE(), MySpassIE(),
SpiegelIE(), SpiegelIE(),
LiveLeakIE(),
GenericIE() GenericIE()
] ]

View File

@ -311,7 +311,7 @@ def clean_html(html):
html = re.sub('<.*?>', '', html) html = re.sub('<.*?>', '', html)
# Replace html entities # Replace html entities
html = unescapeHTML(html) html = unescapeHTML(html)
return html return html.strip()
def sanitize_open(filename, open_mode): def sanitize_open(filename, open_mode):
@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode):
if sys.platform == 'win32': if sys.platform == 'win32':
import msvcrt import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
except (IOError, OSError) as err: except (IOError, OSError) as err:
@ -435,6 +435,7 @@ class ExtractorError(Exception):
""" tb, if given, is the original traceback (so that it can be printed out). """ """ tb, if given, is the original traceback (so that it can be printed out). """
super(ExtractorError, self).__init__(msg) super(ExtractorError, self).__init__(msg)
self.traceback = tb self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
def format_traceback(self): def format_traceback(self):
if self.traceback is None: if self.traceback is None:
@ -449,7 +450,10 @@ class DownloadError(Exception):
configured to continue on errors. They will contain the appropriate configured to continue on errors. They will contain the appropriate
error message. error message.
""" """
pass def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(Exception): class SameFileError(Exception):

View File

@ -1,2 +1,2 @@
__version__ = '2013.02.25' __version__ = '2013.04.03'