1
0
mirror of https://code.hackerspace.pl/q3k/youtube-dl synced 2025-03-16 11:43:02 +00:00
Conflicts:
	.gitignore
	LATEST_VERSION
	Makefile
	youtube-dl
	youtube-dl.exe
	youtube_dl/InfoExtractors.py
	youtube_dl/__init__.py
This commit is contained in:
Jeff Crouse 2013-01-05 15:03:54 -05:00
commit 258d5850c9
43 changed files with 6464 additions and 4994 deletions

24
.gitignore vendored
View File

@ -1,17 +1,19 @@
*.pyc *.pyc
*.pyo *.pyo
*~ *~
*.DS_Store
wine-py2exe/ wine-py2exe/
py2exe.log py2exe.log
youtube-dl *.kate-swp
build/
dist/
MANIFEST
README.txt
youtube-dl.1 youtube-dl.1
LATEST_VERSION youtube-dl.bash-completion
youtube-dl
#OS X youtube-dl.exe
.DS_Store youtube-dl.tar.gz
.AppleDouble .coverage
.LSOverride cover/
Icon updates_key.pem
._*
.Spotlight-V100
.Trashes

17
.tarignore Normal file
View File

@ -0,0 +1,17 @@
updates_key.pem
*.pyc
*.pyo
youtube-dl.exe
wine-py2exe/
py2exe.log
*.kate-swp
build/
dist/
MANIFEST
*.DS_Store
youtube-dl.tar.gz
.coverage
cover/
__pycache__/
.git/
*~

View File

@ -1,9 +1,14 @@
language: python language: python
#specify the python version
python: python:
- "2.6" - "2.6"
- "2.7" - "2.7"
#command to install the setup - "3.3"
install: script: nosetests test --verbose
# command to run tests notifications:
script: nosetests test --nocapture email:
- filippo.valsorda@gmail.com
- phihag@phihag.de
irc:
channels:
- "irc.freenode.org#youtube-dl"
skip_join: true

14
CHANGELOG Normal file
View File

@ -0,0 +1,14 @@
2013.01.02 Codename: GIULIA
* Add support for ComedyCentral clips <nto>
* Corrected Vimeo description fetching <Nick Daniels>
* Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
* --verbose offers more environment info
* New info_dict field: uploader_id
* New updates system, with signature checking
* New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
* Fixed IEs: BlipTv
* Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
* Simplified IEs and test code
* Various (Python 3 and other) fixes
* Revamped and expanded tests

1
LATEST_VERSION Normal file
View File

@ -0,0 +1 @@
2012.10.09

24
LICENSE Normal file
View File

@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

3
MANIFEST.in Normal file
View File

@ -0,0 +1,3 @@
include README.md
include test/*.py
include test/*.json

View File

@ -1,8 +1,7 @@
all: youtube-dl README.md youtube-dl.1 youtube-dl.bash-completion LATEST_VERSION all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
# TODO: re-add youtube-dl.exe, and make sure it's 1. safe and 2. doesn't need sudo
clean: clean:
rm -f youtube-dl youtube-dl.exe youtube-dl.1 LATEST_VERSION youtube_dl/*.pyc rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/
PREFIX=/usr/local PREFIX=/usr/local
BINDIR=$(PREFIX)/bin BINDIR=$(PREFIX)/bin
@ -17,43 +16,32 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
.PHONY: all clean install youtube-dl.bash-completion test:
# TODO un-phony README.md and youtube-dl.bash_completion by reading from .in files and generating from them #nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
nosetests --verbose test
.PHONY: all clean install test
youtube-dl: youtube_dl/*.py youtube-dl: youtube_dl/*.py
zip --quiet --junk-paths youtube-dl youtube_dl/*.py zip --quiet youtube-dl youtube_dl/*.py
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
echo '#!/usr/bin/env python' > youtube-dl echo '#!/usr/bin/env python' > youtube-dl
cat youtube-dl.zip >> youtube-dl cat youtube-dl.zip >> youtube-dl
rm youtube-dl.zip rm youtube-dl.zip
chmod a+x youtube-dl chmod a+x youtube-dl
youtube-dl.exe: youtube_dl/*.py
bash devscripts/wine-py2exe.sh build_exe.py
README.md: youtube_dl/*.py README.md: youtube_dl/*.py
@options=$$(COLUMNS=80 python -m youtube_dl --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/## \1/') && \ COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
header=$$(sed -e '/.*# OPTIONS/,$$ d' README.md) && \
footer=$$(sed -e '1,/.*# FAQ/ d' README.md) && \
echo "$${header}" > README.md && \
echo >> README.md && \
echo '# OPTIONS' >> README.md && \
echo "$${options}" >> README.md&& \
echo >> README.md && \
echo '# FAQ' >> README.md && \
echo "$${footer}" >> README.md
youtube-dl.1: README.txt: README.md
pandoc -s -w man README.md -o youtube-dl.1 pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.bash-completion: youtube-dl.1: README.md
@options=`egrep -o '(--[a-z-]+) ' README.md | sort -u | xargs echo` && \ pandoc -s -f markdown -t man README.md -o youtube-dl.1
content=`sed "s/opts=\"[^\"]*\"/opts=\"$${options}\"/g" youtube-dl.bash-completion` && \
echo "$${content}" > youtube-dl.bash-completion
LATEST_VERSION: youtube_dl/__init__.py youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
python -m youtube_dl --version > LATEST_VERSION python devscripts/bash-completion.py
test: youtube-dl.tar.gz: all
nosetests2 --nocapture test tar -cvzf youtube-dl.tar.gz -s "|^./|./youtube-dl/|" \
--exclude-from=".tarignore" -- .
.PHONY: default compile update update-latest update-readme test clean

View File

@ -1,4 +1,4 @@
% youtube-dl(1) % YOUTUBE-DL(1)
# NAME # NAME
youtube-dl youtube-dl
@ -20,6 +20,11 @@ which means you can modify it, redistribute it or use it however you like.
-i, --ignore-errors continue on download errors -i, --ignore-errors continue on download errors
-r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m) -r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m)
-R, --retries RETRIES number of retries (default is 10) -R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default
is 1024)
--no-resize-buffer do not automatically adjust the buffer size. By
default, the buffer size is automatically resized
from an initial value of SIZE.
--dump-user-agent display the current browser identification --dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent --user-agent UA specify a custom user agent
--list-extractors List all supported extractors and the URLs they --list-extractors List all supported extractors and the URLs they
@ -37,16 +42,22 @@ which means you can modify it, redistribute it or use it however you like.
Filesystem Options: Filesystem Options:
-t, --title use title in file name -t, --title use title in file name
--id use video ID in file name --id use video ID in file name
-l, --literal use literal title in file name -l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000 -A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(stitle)s to get the -o, --output TEMPLATE output filename template. Use %(title)s to get the
title, %(uploader)s for the uploader name, title, %(uploader)s for the uploader name,
%(autonumber)s to get an automatically incremented %(uploader_id)s for the uploader nickname if
number, %(ext)s for the filename extension, different, %(autonumber)s to get an automatically
%(upload_date)s for the upload date (YYYYMMDD), incremented number, %(ext)s for the filename
%(extractor)s for the provider (youtube, metacafe, extension, %(upload_date)s for the upload date
etc), %(id)s for the video id and %% for a literal (YYYYMMDD), %(extractor)s for the provider
percent. Use - to output to stdout. (youtube, metacafe, etc), %(id)s for the video id
and %% for a literal percent. Use - to output to
stdout. Can also be used to download to a different
directory, for example with -o '/my/downloads/%(upl
oader)s/%(title)s-%(id)s.%(ext)s' .
--restrict-filenames Restrict filenames to only ASCII characters, and
avoid "&" and spaces in filenames
-a, --batch-file FILE file containing URLs to download ('-' for stdin) -a, --batch-file FILE file containing URLs to download ('-' for stdin)
-w, --no-overwrites do not overwrite files -w, --no-overwrites do not overwrite files
-c, --continue resume partially downloaded files -c, --continue resume partially downloaded files
@ -101,6 +112,34 @@ which means you can modify it, redistribute it or use it however you like.
specific bitrate like 128K (default 5) specific bitrate like 128K (default 5)
-k, --keep-video keeps the video file on disk after the post- -k, --keep-video keeps the video file on disk after the post-
processing; the video is erased by default processing; the video is erased by default
--no-post-overwrites do not overwrite post-processed files; the post-
processed files are overwritten by default
# CONFIGURATION
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.local/config/youtube-dl.conf`.
# OUTPUT TEMPLATE
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
- `id`: The sequence will be replaced by the video identifier.
- `url`: The sequence will be replaced by the video URL.
- `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
- `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
- `title`: The sequence will be replaced by the video title.
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
youtube-dl_test_video_.mp4 # A simple file name
# FAQ # FAQ
@ -137,17 +176,9 @@ The error
means you're using an outdated version of Python. Please update to Python 2.6 or 2.7. means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
To run youtube-dl under Python 2.5, you'll have to manually check it out like this:
git clone git://github.com/rg3/youtube-dl.git
cd youtube-dl
python -m youtube_dl --help
Please note that Python 2.5 is not supported anymore.
### What is this binary file? Where has the code gone? ### What is this binary file? Where has the code gone?
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repo to see the code. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make compile`. Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
### The exe throws a *Runtime error from Visual C++* ### The exe throws a *Runtime error from Visual C++*
@ -166,6 +197,9 @@ Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/i
Please include: Please include:
* Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem. * Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
* The output of `youtube-dl --version` * The output of `youtube-dl --version`
* The output of `python --version` * The output of `python --version`
* The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough). * The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
For discussions, join us in the irc channel #youtube-dl on freenode.

6
bin/youtube-dl Executable file
View File

@ -0,0 +1,6 @@
#!/usr/bin/env python
import youtube_dl
if __name__ == '__main__':
youtube_dl.main()

View File

@ -1,48 +0,0 @@
from distutils.core import setup
import py2exe
import sys, os
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
# If run without args, build executables
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # conflict with wine-py2exe.sh
sys.path.append('./youtube_dl')
options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
console = [{
"script":"./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
init_file = open('./youtube_dl/__init__.py')
for line in init_file.readlines():
if line.startswith('__version__'):
version = line[11:].strip(" ='\n")
break
else:
version = ''
setup(name='youtube-dl',
version=version,
description='Small command-line program to download videos from YouTube.com and other video sites',
url='https://github.com/rg3/youtube-dl',
packages=['youtube_dl'],
console = console,
options = {"py2exe": options},
zipfile = None,
)
import shutil
shutil.rmtree("build")

View File

@ -0,0 +1,14 @@
__youtube-dl()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="{{flags}}"
if [[ ${cur} == * ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F __youtube-dl youtube-dl

26
devscripts/bash-completion.py Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env python
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
def build_completion(opt_parser):
opts_flag = []
for group in opt_parser.option_groups:
for option in group.option_list:
#for every long flag
opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f:
#just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)

View File

@ -0,0 +1,33 @@
#!/usr/bin/env python3
import json
import sys
import hashlib
import urllib.request
if len(sys.argv) <= 1:
print('Specify the version number as parameter')
sys.exit()
version = sys.argv[1]
with open('update/LATEST_VERSION', 'w') as f:
f.write(version)
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
new_version = {}
filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
for key, filename in filenames.items():
print('Downloading and checksumming %s...' %filename)
url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
data = urllib.request.urlopen(url).read()
sha256sum = hashlib.sha256(data).hexdigest()
new_version[key] = (url, sha256sum)
versions_info['versions'][version] = new_version
versions_info['latest'] = version
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import hashlib
import shutil
import subprocess
import tempfile
import urllib.request
import json
versions_info = json.load(open('update/versions.json'))
version = versions_info['latest']
URL = versions_info['versions'][version]['bin'][0]
data = urllib.request.urlopen(URL).read()
# Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
md5sum = hashlib.md5(data).hexdigest()
sha1sum = hashlib.sha1(data).hexdigest()
sha256sum = hashlib.sha256(data).hexdigest()
template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', URL)
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template)

View File

@ -0,0 +1,28 @@
#!/usr/bin/env python3
import rsa
import json
from binascii import hexlify
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:')
privkey = ''
while True:
try:
line = input()
except EOFError:
break
if line == '':
break
privkey += line + '\n'
privkey = bytes(privkey, 'ascii')
privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature)
versions_info['signature'] = signature
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

View File

@ -0,0 +1,21 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement
import datetime
import glob
import io # For Python 2 compatibilty
import os
import re
year = str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f:
content = f.read()
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
if content != newc:
tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
outf.write(newc)
os.rename(tmpFn, fn)

20
devscripts/make_readme.py Executable file
View File

@ -0,0 +1,20 @@
import sys
import re
README_FILE = 'README.md'
helptext = sys.stdin.read()
with open(README_FILE) as f:
oldreadme = f.read()
header = oldreadme[:oldreadme.index('# OPTIONS')]
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
options = helptext[helptext.index(' General Options:')+19:]
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
options = '# OPTIONS\n' + options + '\n'
with open(README_FILE, 'w') as f:
f.write(header)
f.write(options)
f.write(footer)

View File

@ -1,11 +1,85 @@
#!/bin/sh #!/bin/sh
# IMPORTANT: the following assumptions are made
# * the GH repo is on the origin remote
# * the gh-pages branch is named so locally
# * the git config user.signingkey is properly set
# You will need
# pip install coverage nose rsa
# TODO
# release notes
# make hash on local files
set -e
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
version="$1" version="$1"
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
if [ ! -z "`git status --porcelain`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/__init__.py if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
make all
git add -A echo "\n### First of all, testing..."
make clean
nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1
echo "\n### Changing version in version.py..."
sed -i~ "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
make README.md
git add CHANGELOG README.md youtube_dl/version.py
git commit -m "release $version" git commit -m "release $version"
git tag -m "Release $version" "$version"
echo "\n### Now tagging, signing and pushing..."
git tag -s -m "Release $version" "$version"
git show "$version"
read -p "Is it good, can I push? (y/n) " -n 1
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
echo
MASTER=$(git rev-parse --abbrev-ref HEAD)
git push origin $MASTER:master
git push origin "$version"
echo "\n### OK, now it is time to build the binaries..."
REV=$(git rev-parse HEAD)
make youtube-dl youtube-dl.tar.gz
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
mkdir -p "update_staging/$version"
mv youtube-dl youtube-dl.exe "update_staging/$version"
mv youtube-dl.tar.gz "update_staging/$version/youtube-dl-$version.tar.gz"
RELEASE_FILES=youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz
(cd update_staging/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
(cd update_staging/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
(cd update_staging/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
(cd update_staging/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
git checkout HEAD -- youtube-dl youtube-dl.exe
echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
for f in $RELEASE_FILES; do gpg --detach-sig "update_staging/$version/$f"; done
scp -r "update_staging/$version" ytdl@youtube-dl.org:html/downloads/
rm -r update_staging
echo "\n### Now switching to gh-pages..."
git checkout gh-pages
git checkout "$MASTER" -- devscripts/gh-pages/
git reset devscripts/gh-pages/
devscripts/gh-pages/add-version.py $version
devscripts/gh-pages/sign-versions.py < updates_key.pem
devscripts/gh-pages/generate-download.py
devscripts/gh-pages/update-copyright.py
rm -r test_coverage
mv cover test_coverage
git add *.html *.html.in update test_coverage
git commit -m "release $version"
git show HEAD
read -p "Is it good, can I push? (y/n) " -n 1
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
echo
git push origin gh-pages
echo "\n### DONE!"
rm -r devscripts
git checkout $MASTER

View File

@ -0,0 +1,40 @@
#!/usr/bin/env python
import sys, os
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
urlh = compat_urllib_request.urlopen(BIN_URL)
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

View File

@ -0,0 +1,12 @@
from distutils.core import setup
import py2exe
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)

View File

@ -0,0 +1,102 @@
#!/usr/bin/env python
import sys, os
import urllib2
import json, hashlib
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
raw_input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory)
try:
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = urllib2.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to write the new version')
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

74
setup.py Normal file
View File

@ -0,0 +1,74 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.core import setup
import pkg_resources
import sys
try:
import py2exe
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
py2exe_console = [{
"script": "./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
py2exe_params = {
'console': py2exe_console,
'options': { "py2exe": py2exe_options },
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
params = {
'scripts': ['bin/youtube-dl'],
'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo...
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1/', ['youtube-dl.1'])]
}
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec'))
setup(
name = 'youtube_dl',
version = __version__,
description = 'YouTube video downloader',
long_description = 'Small command-line program to download videos from YouTube.com and other video sites.',
url = 'https://github.com/rg3/youtube-dl',
author = 'Ricardo Garcia',
maintainer = 'Philipp Hagemeister',
maintainer_email = 'phihag@phihag.de',
packages = ['youtube_dl'],
# Provokes warning on most systems (why?!)
#test_suite = 'nose.collector',
#test_requires = ['nosetest'],
classifiers = [
"Topic :: Multimedia :: Video",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: Public Domain",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3"
],
**params
)

View File

@ -1 +1,40 @@
{"username": null, "listformats": null, "skip_download": false, "usenetrc": false, "max_downloads": null, "noprogress": false, "forcethumbnail": false, "forceformat": false, "format_limit": null, "ratelimit": null, "nooverwrites": false, "forceurl": false, "writeinfojson": false, "simulate": false, "playliststart": 1, "continuedl": true, "password": null, "prefer_free_formats": false, "nopart": false, "retries": 10, "updatetime": true, "consoletitle": false, "verbose": true, "forcefilename": false, "ignoreerrors": false, "logtostderr": false, "format": null, "subtitleslang": null, "quiet": false, "outtmpl": "%(id)s.%(ext)s", "rejecttitle": null, "playlistend": -1, "writedescription": false, "forcetitle": false, "forcedescription": false, "writesubtitles": false, "matchtitle": null} {
"consoletitle": false,
"continuedl": true,
"forcedescription": false,
"forcefilename": false,
"forceformat": false,
"forcethumbnail": false,
"forcetitle": false,
"forceurl": false,
"format": null,
"format_limit": null,
"ignoreerrors": false,
"listformats": null,
"logtostderr": false,
"matchtitle": null,
"max_downloads": null,
"nooverwrites": false,
"nopart": false,
"noprogress": false,
"outtmpl": "%(id)s.%(ext)s",
"password": null,
"playlistend": -1,
"playliststart": 1,
"prefer_free_formats": false,
"quiet": false,
"ratelimit": null,
"rejecttitle": null,
"retries": 10,
"simulate": false,
"skip_download": false,
"subtitleslang": null,
"test": true,
"updatetime": true,
"usenetrc": false,
"username": null,
"verbose": true,
"writedescription": false,
"writeinfojson": true,
"writesubtitles": false
}

27
test/test_all_urls.py Normal file
View File

@ -0,0 +1,27 @@
#!/usr/bin/env python
import sys
import unittest
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE
class TestAllURLsMatching(unittest.TestCase):
def test_youtube_playlist_matching(self):
self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
def test_youtube_matching(self):
self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
def test_youtube_extract(self):
self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
if __name__ == '__main__':
unittest.main()

View File

@ -1,93 +1,125 @@
#!/usr/bin/env python2 #!/usr/bin/env python
import unittest
import errno
import hashlib import hashlib
import io
import os import os
import json import json
import unittest
import sys
import hashlib
import socket
from youtube_dl.FileDownloader import FileDownloader # Allow direct execution
from youtube_dl.InfoExtractors import YoutubeIE, DailymotionIE sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import MetacafeIE, BlipTVIE
import youtube_dl.FileDownloader
import youtube_dl.InfoExtractors
from youtube_dl.utils import *
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
def _try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return youtube_dl.FileDownloader.process_info(self, info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
with io.open(DEF_FILE, encoding='utf-8') as deff:
defs = json.load(deff)
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
class DownloadTest(unittest.TestCase): class TestDownload(unittest.TestCase):
PARAMETERS_FILE = "test/parameters.json" def setUp(self):
#calculated with md5sum: self.parameters = parameters
#md5sum (GNU coreutils) 8.19 self.defs = defs
YOUTUBE_SIZE = 1993883 ### Dynamically generate tests
YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc" def generator(test_case):
YOUTUBE_FILE = "BaW_jenozKc.mp4"
DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47" def test_template(self):
DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech" ie = getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
DAILYMOTION_FILE = "x33vw9.mp4" if not ie._WORKING:
print('Skipping: IE marked as not _WORKING')
return
if 'playlist' not in test_case and not test_case['file']:
print('Skipping: No output file specified')
return
if 'skip' in test_case:
print('Skipping: {0}'.format(test_case['skip']))
return
METACAFE_SIZE = 5754305 params = self.parameters.copy()
METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/" params.update(test_case.get('params', {}))
METACAFE_FILE = "_aUehQsCQtM.flv"
BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7" fd = FileDownloader(params)
BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352" fd.add_info_extractor(ie())
BLIP_FILE = "5779306.m4v" for ien in test_case.get('add_ie', []):
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
XVIDEO_MD5 = "" test_cases = test_case.get('playlist', [test_case])
XVIDEO_URL = "" for tc in test_cases:
XVIDEO_FILE = "" _try_rm(tc['file'])
_try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json')
try:
fd.download([test_case['url']])
for tc in test_cases:
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc['file']))
self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
if 'md5' in tc:
md5_for_file = _file_md5(tc['file'])
self.assertEqual(md5_for_file, tc['md5'])
with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
info_dict = json.load(infof)
for (info_field, value) in tc.get('info_dict', {}).items():
if value.startswith('md5:'):
md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest()
self.assertEqual(value[3:], md5_info_value)
else:
self.assertEqual(value, info_dict.get(info_field))
finally:
for tc in test_cases:
_try_rm(tc['file'])
_try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json')
return test_template
### And add them to TestDownload
for test_case in defs:
test_method = generator(test_case)
test_method.__name__ = "test_{0}".format(test_case["name"])
setattr(TestDownload, test_method.__name__, test_method)
del test_method
def test_youtube(self): if __name__ == '__main__':
#let's download a file from youtube unittest.main()
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.YOUTUBE_URL])
self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE)
def test_dailymotion(self):
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(DailymotionIE())
fd.download([DownloadTest.DAILYMOTION_URL])
self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE))
md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE)
self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5)
def test_metacafe(self):
#this emulate a skip,to be 2.6 compatible
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(MetacafeIE())
fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.METACAFE_URL])
self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE)
def test_blip(self):
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(BlipTVIE())
fd.download([DownloadTest.BLIP_URL])
self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE))
md5_down_file = md5_for_file(DownloadTest.BLIP_FILE)
self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5)
def tearDown(self):
if os.path.exists(DownloadTest.YOUTUBE_FILE):
os.remove(DownloadTest.YOUTUBE_FILE)
if os.path.exists(DownloadTest.DAILYMOTION_FILE):
os.remove(DownloadTest.DAILYMOTION_FILE)
if os.path.exists(DownloadTest.METACAFE_FILE):
os.remove(DownloadTest.METACAFE_FILE)
if os.path.exists(DownloadTest.BLIP_FILE):
os.remove(DownloadTest.BLIP_FILE)
def md5_for_file(filename, block_size=2**20):
with open(filename) as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()

26
test/test_execution.py Normal file
View File

@ -0,0 +1,26 @@
import unittest
import sys
import os
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
_DEV_NULL = subprocess.DEVNULL
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb')
class TestExecution(unittest.TestCase):
def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self):
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self):
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
if __name__ == '__main__':
unittest.main()

View File

@ -1,47 +1,100 @@
# -*- coding: utf-8 -*- #!/usr/bin/env python
# Various small unit tests # Various small unit tests
import sys
import unittest import unittest
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#from youtube_dl.utils import htmlentity_transform #from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import timeconvert from youtube_dl.utils import timeconvert
from youtube_dl.utils import sanitize_filename from youtube_dl.utils import sanitize_filename
from youtube_dl.utils import unescapeHTML from youtube_dl.utils import unescapeHTML
from youtube_dl.utils import orderedSet from youtube_dl.utils import orderedSet
if sys.version_info < (3, 0):
_compat_str = lambda b: b.decode('unicode-escape')
else:
_compat_str = lambda s: s
class TestUtil(unittest.TestCase): class TestUtil(unittest.TestCase):
def test_timeconvert(self): def test_timeconvert(self):
self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None) self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self): def test_sanitize_filename(self):
self.assertEqual(sanitize_filename(u'abc'), u'abc') self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename(u'abc_d-e'), u'abc_d-e') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename(u'123'), u'123') self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual(u'abc-de', sanitize_filename(u'abc/de')) self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse(u'/' in sanitize_filename(u'abc/de///')) self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual(u'abc-de', sanitize_filename(u'abc/<>\\*|de')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual(u'xxx', sanitize_filename(u'xxx/<>\\*|')) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual(u'yes no', sanitize_filename(u'yes? no')) self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual(u'this - that', sanitize_filename(u'this: that')) self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename(u'ä'), u'ä') self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
self.assertEqual(sanitize_filename(u'кириллица'), u'кириллица') aumlaut = _compat_str('\xe4')
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
self.assertEqual(sanitize_filename(tests), tests)
for forbidden in u'"\0\\/': forbidden = '"\0\\/'
self.assertTrue(forbidden not in sanitize_filename(forbidden)) for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_ordered_set(self): def test_sanitize_filename_restricted(self):
self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7]) self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(orderedSet([]), []) self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
self.assertEqual(orderedSet([135,1,1,1]), [135,1])
def test_unescape_html(self): self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual(unescapeHTML(u"%20;"), u"%20;")
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
import json
import os
import sys
import unittest
# Allow direct execution
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl.FileDownloader
import youtube_dl.InfoExtractors
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
self.to_stderr = self.to_screen
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
params = json.load(pf)
params['writeinfojson'] = True
params['skip_download'] = True
params['writedescription'] = True
TEST_ID = 'BaW_jenozKc'
INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
This is a test video for youtube-dl.
For more information, contact phihag@phihag.de .'''
class TestInfoJSON(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
ie = youtube_dl.InfoExtractors.YoutubeIE()
fd = FileDownloader(params)
fd.add_info_extractor(ie)
fd.download([TEST_ID])
self.assertTrue(os.path.exists(INFO_JSON_FILE))
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
jd = json.load(jsonf)
self.assertEqual(jd['upload_date'], u'20121002')
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
self.assertEqual(jd['id'], TEST_ID)
self.assertEqual(jd['extractor'], 'youtube')
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
self.assertTrue(os.path.exists(DESCRIPTION_FILE))
with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
descr = descf.read()
self.assertEqual(descr, EXPECTED_DESCRIPTION)
def tearDown(self):
if os.path.exists(INFO_JSON_FILE):
os.remove(INFO_JSON_FILE)
if os.path.exists(DESCRIPTION_FILE):
os.remove(DESCRIPTION_FILE)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python
import sys
import unittest
import json
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FakeDownloader(object):
def __init__(self):
self.result = []
self.params = parameters
def to_screen(self, s):
print(s)
def trouble(self, s):
raise Exception(s)
def download(self, x):
self.result.append(x)
class TestYoutubeLists(unittest.TestCase):
def test_youtube_playlist(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(DL.result, [
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
])
def test_youtube_playlist_long(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertTrue(len(DL.result) >= 799)
def test_youtube_course(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
# TODO find a > 100 (paginating?) videos course
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
self.assertEqual(len(DL.result), 25)
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
def test_youtube_channel(self):
# I give up, please find a channel that does paginate and test this like test_youtube_playlist_long
pass # TODO
def test_youtube_user(self):
DL = FakeDownloader()
IE = YoutubeUserIE(DL)
IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(DL.result) >= 320)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python
import sys
import unittest
import json
import io
import hashlib
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeIE
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FakeDownloader(object):
def __init__(self):
self.result = []
self.params = parameters
def to_screen(self, s):
print(s)
def trouble(self, s):
raise Exception(s)
def download(self, x):
self.result.append(x)
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class TestYoutubeSubtitles(unittest.TestCase):
def test_youtube_subtitles(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033')
def test_youtube_subtitles_it(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
DL.params['subtitleslang'] = 'it'
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a')
if __name__ == '__main__':
unittest.main()

164
test/tests.json Normal file
View File

@ -0,0 +1,164 @@
[
{
"name": "Youtube",
"url": "http://www.youtube.com/watch?v=BaW_jenozKc",
"file": "BaW_jenozKc.mp4",
"info_dict": {
"title": "youtube-dl test video \"'/\\ä↭𝕐",
"uploader": "Philipp Hagemeister",
"uploader_id": "phihag",
"upload_date": "20121002",
"description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
}
},
{
"name": "Dailymotion",
"md5": "392c4b85a60a90dc4792da41ce3144eb",
"url": "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech",
"file": "x33vw9.mp4"
},
{
"name": "Metacafe",
"add_ie": ["Youtube"],
"url": "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
"file": "_aUehQsCQtM.flv"
},
{
"name": "BlipTV",
"md5": "b2d849efcf7ee18917e4b4d9ff37cafe",
"url": "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352",
"file": "5779306.m4v"
},
{
"name": "XVideos",
"md5": "1d0c835822f0a71a7bf011855db929d0",
"url": "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
"file": "939581.flv"
},
{
"name": "Vimeo",
"md5": "8879b6cc097e987f02484baf890129e5",
"url": "http://vimeo.com/56015672",
"file": "56015672.mp4",
"info_dict": {
"title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
"uploader": "Filippo Valsorda",
"uploader_id": "user7108434",
"upload_date": "20121220",
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
}
},
{
"name": "Soundcloud",
"md5": "ebef0a451b909710ed1d7787dddbf0d7",
"url": "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy",
"file": "62986583.mp3"
},
{
"name": "StanfordOpenClassroom",
"md5": "544a9468546059d4e80d76265b0443b8",
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
"file": "PracticalUnix_intro-environment.mp4"
},
{
"name": "XNXX",
"md5": "0831677e2b4761795f68d417e0b7b445",
"url": "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_",
"file": "1135332.flv"
},
{
"name": "Youku",
"url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
"file": "XNDgyMDQ2NTQw_part00.flv",
"md5": "ffe3f2e435663dc2d1eea34faeff5b5b",
"params": { "test": false }
},
{
"name": "NBA",
"url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html",
"file": "0021200253-okc-bkn-recap.nba.mp4",
"md5": "c0edcfc37607344e2ff8f13c378c88a4"
},
{
"name": "JustinTV",
"url": "http://www.twitch.tv/thegamedevhub/b/296128360",
"file": "296128360.flv",
"md5": "ecaa8a790c22a40770901460af191c9a"
},
{
"name": "MyVideo",
"url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win",
"file": "8229274.flv",
"md5": "2d2753e8130479ba2cb7e0a37002053e"
},
{
"name": "Escapist",
"url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
"file": "6618-Breaking-Down-Baldurs-Gate.flv",
"md5": "c6793dbda81388f4264c1ba18684a74d",
"skip": "Fails with timeout on Travis"
},
{
"name": "GooglePlus",
"url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
"file": "ZButuJc6CtH.flv"
},
{
"name": "FunnyOrDie",
"url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version",
"file": "0732f586d7.mp4",
"md5": "f647e9e90064b53b6e046e75d0241fbd"
},
{
"name": "TweetReel",
"url": "http://tweetreel.com/?77smq",
"file": "77smq.mov",
"md5": "56b4d9ca9de467920f3f99a6d91255d6",
"info_dict": {
"uploader": "itszero",
"uploader_id": "itszero",
"upload_date": "20091225",
"description": "Installing Gentoo Linux on Powerbook G4, it turns out the sleep indicator becomes HDD activity indicator :D"
}
},
{
"name": "Steam",
"url": "http://store.steampowered.com/video/105600/",
"playlist": [
{
"file": "81300.flv",
"md5": "f870007cee7065d7c76b88f0a45ecc07",
"info_dict": {
"title": "Terraria 1.1 Trailer"
}
},
{
"file": "80859.flv",
"md5": "61aaf31a5c5c3041afb58fb83cbb5751",
"info_dict": {
"title": "Terraria Trailer"
}
}
]
},
{
"name": "Ustream",
"url": "http://www.ustream.tv/recorded/20274954",
"file": "20274954.flv",
"md5": "088f151799e8f572f84eb62f17d73e5c",
"info_dict": {
"title": "Young Americans for Liberty February 7, 2012 2:28 AM"
}
},
{
"name": "InfoQ",
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
"file": "12-jan-pythonthings.mp4",
"info_dict": {
"title": "A Few of My Favorite [Python] Things"
},
"params": {
"skip_download": true
}
}
]

BIN
youtube-dl Executable file

Binary file not shown.

View File

@ -1,14 +0,0 @@
__youtube-dl()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="--all-formats --audio-format --audio-quality --auto-number --batch-file --console-title --continue --cookies --dump-user-agent --extract-audio --format --get-description --get-filename --get-format --get-thumbnail --get-title --get-url --help --id --ignore-errors --keep-video --list-extractors --list-formats --literal --match-title --max-downloads --max-quality --netrc --no-continue --no-mtime --no-overwrites --no-part --no-progress --output --password --playlist-end --playlist-start --prefer-free-formats --quiet --rate-limit --reject-title --retries --simulate --skip-download --srt-lang --title --update --user-agent --username --verbose --version --write-description --write-info-json --write-srt"
if [[ ${cur} == * ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F __youtube-dl youtube-dl

BIN
youtube-dl.exe Normal file

Binary file not shown.

View File

@ -1,693 +1,738 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import httplib from __future__ import absolute_import
import math import math
import io
import os import os
import re import re
import socket import socket
import subprocess import subprocess
import sys import sys
import time import time
import urllib2 import traceback
if os.name == 'nt': if os.name == 'nt':
import ctypes import ctypes
from utils import * from .utils import *
class FileDownloader(object): class FileDownloader(object):
"""File Downloader class. """File Downloader class.
File downloader objects are the ones responsible of downloading the File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them. has to pass the URL to one of them.
For this, file downloader objects have a method that allows For this, file downloader objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed InfoExtractors to be registered in a given order. When it is passed
a URL, the file downloader handles it to the first InfoExtractor it a URL, the file downloader handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and all the information about the video or videos the URL refers to, and
asks the FileDownloader to process the video information, possibly asks the FileDownloader to process the video information, possibly
downloading the video. downloading the video.
File downloaders accept a lot of parameters. In order not to saturate File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params options instead. These options are available through the params
attribute for the InfoExtractors to use. The FileDownloader also attribute for the InfoExtractors to use. The FileDownloader also
registers itself as the downloader in charge for the InfoExtractors registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration". that are added to it, so this is a "mutual registration".
Available options: Available options:
username: Username for authentication purposes. username: Username for authentication purposes.
password: Password for authentication purposes. password: Password for authentication purposes.
usenetrc: Use netrc for authentication instead. usenetrc: Use netrc for authentication instead.
quiet: Do not print messages to stdout. quiet: Do not print messages to stdout.
forceurl: Force printing final URL. forceurl: Force printing final URL.
forcetitle: Force printing title. forcetitle: Force printing title.
forcethumbnail: Force printing thumbnail URL. forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description. forcedescription: Force printing description.
forcefilename: Force printing final filename. forcefilename: Force printing final filename.
simulate: Do not download the video files. simulate: Do not download the video files.
format: Video format code. format: Video format code.
format_limit: Highest quality format to try. format_limit: Highest quality format to try.
outtmpl: Template for output names. outtmpl: Template for output names.
ignoreerrors: Do not stop on download errors. restrictfilenames: Do not allow "&" and spaces in file names
ratelimit: Download speed limit, in bytes/sec. ignoreerrors: Do not stop on download errors.
nooverwrites: Prevent overwriting files. ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx nooverwrites: Prevent overwriting files.
continuedl: Try to continue downloads if possible. retries: Number of times to retry for HTTP error 5xx
noprogress: Do not print the progress bar. buffersize: Size of download buffer in bytes.
playliststart: Playlist item to start at. noresizebuffer: Do not automatically resize the download buffer.
playlistend: Playlist item to end at. continuedl: Try to continue downloads if possible.
matchtitle: Download only matching titles. noprogress: Do not print the progress bar.
rejecttitle: Reject downloads for matching titles. playliststart: Playlist item to start at.
logtostderr: Log messages to stderr instead of stdout. playlistend: Playlist item to end at.
consoletitle: Display progress in console window's titlebar. matchtitle: Download only matching titles.
nopart: Do not use temporary .part files. rejecttitle: Reject downloads for matching titles.
updatetime: Use the Last-modified header to set output file timestamps. logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file consoletitle: Display progress in console window's titlebar.
writeinfojson: Write the video description to a .info.json file nopart: Do not use temporary .part files.
writesubtitles: Write the video subtitles to a .srt file updatetime: Use the Last-modified header to set output file timestamps.
subtitleslang: Language of the subtitles to download writedescription: Write the video description to a .description file
""" writeinfojson: Write the video description to a .info.json file
writesubtitles: Write the video subtitles to a .srt file
params = None subtitleslang: Language of the subtitles to download
_ies = [] test: Download only first bytes to test the downloader.
_pps = [] """
_download_retcode = None
_num_downloads = None params = None
_screen_file = None _ies = []
_pps = []
def __init__(self, params): _download_retcode = None
"""Create a FileDownloader object with the given options.""" _num_downloads = None
self._ies = [] _screen_file = None
self._pps = []
self._download_retcode = 0 def __init__(self, params):
self._num_downloads = 0 """Create a FileDownloader object with the given options."""
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._ies = []
self.params = params self._pps = []
self._download_retcode = 0
@staticmethod self._num_downloads = 0
def format_bytes(bytes): self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
if bytes is None: self.params = params
return 'N/A'
if type(bytes) is str: if '%(stitle)s' in self.params['outtmpl']:
bytes = float(bytes) self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
if bytes == 0.0:
exponent = 0 @staticmethod
else: def format_bytes(bytes):
exponent = long(math.log(bytes, 1024.0)) if bytes is None:
suffix = 'bkMGTPEZY'[exponent] return 'N/A'
converted = float(bytes) / float(1024 ** exponent) if type(bytes) is str:
return '%.2f%s' % (converted, suffix) bytes = float(bytes)
if bytes == 0.0:
@staticmethod exponent = 0
def calc_percent(byte_counter, data_len): else:
if data_len is None: exponent = int(math.log(bytes, 1024.0))
return '---.-%' suffix = 'bkMGTPEZY'[exponent]
return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
@staticmethod
def calc_eta(start, now, total, current): @staticmethod
if total is None: def calc_percent(byte_counter, data_len):
return '--:--' if data_len is None:
dif = now - start return '---.-%'
if current == 0 or dif < 0.001: # One millisecond return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
return '--:--'
rate = float(current) / dif @staticmethod
eta = long((float(total) - float(current)) / rate) def calc_eta(start, now, total, current):
(eta_mins, eta_secs) = divmod(eta, 60) if total is None:
if eta_mins > 99: return '--:--'
return '--:--' dif = now - start
return '%02d:%02d' % (eta_mins, eta_secs) if current == 0 or dif < 0.001: # One millisecond
return '--:--'
@staticmethod rate = float(current) / dif
def calc_speed(start, now, bytes): eta = int((float(total) - float(current)) / rate)
dif = now - start (eta_mins, eta_secs) = divmod(eta, 60)
if bytes == 0 or dif < 0.001: # One millisecond if eta_mins > 99:
return '%10s' % '---b/s' return '--:--'
return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) return '%02d:%02d' % (eta_mins, eta_secs)
@staticmethod @staticmethod
def best_block_size(elapsed_time, bytes): def calc_speed(start, now, bytes):
new_min = max(bytes / 2.0, 1.0) dif = now - start
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB if bytes == 0 or dif < 0.001: # One millisecond
if elapsed_time < 0.001: return '%10s' % '---b/s'
return long(new_max) return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
rate = bytes / elapsed_time
if rate > new_max: @staticmethod
return long(new_max) def best_block_size(elapsed_time, bytes):
if rate < new_min: new_min = max(bytes / 2.0, 1.0)
return long(new_min) new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
return long(rate) if elapsed_time < 0.001:
return int(new_max)
@staticmethod rate = bytes / elapsed_time
def parse_bytes(bytestr): if rate > new_max:
"""Parse a string indicating a byte quantity into a long integer.""" return int(new_max)
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) if rate < new_min:
if matchobj is None: return int(new_min)
return None return int(rate)
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) @staticmethod
return long(round(number * multiplier)) def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
def add_info_extractor(self, ie): matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
"""Add an InfoExtractor object to the end of the list.""" if matchobj is None:
self._ies.append(ie) return None
ie.set_downloader(self) number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
def add_post_processor(self, pp): return int(round(number * multiplier))
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp) def add_info_extractor(self, ie):
pp.set_downloader(self) """Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
def to_screen(self, message, skip_eol=False): ie.set_downloader(self)
"""Print message to stdout if not in quiet mode."""
assert type(message) == type(u'') def add_post_processor(self, pp):
if not self.params.get('quiet', False): """Add a PostProcessor object to the end of the chain."""
terminator = [u'\n', u''][skip_eol] self._pps.append(pp)
output = message + terminator pp.set_downloader(self)
if 'b' not in self._screen_file.mode or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
output = output.encode(preferredencoding(), 'ignore') def to_screen(self, message, skip_eol=False):
self._screen_file.write(output) """Print message to stdout if not in quiet mode."""
self._screen_file.flush() assert type(message) == type(u'')
if not self.params.get('quiet', False):
def to_stderr(self, message): terminator = [u'\n', u''][skip_eol]
"""Print message to stderr.""" output = message + terminator
print >>sys.stderr, message.encode(preferredencoding()) if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
output = output.encode(preferredencoding(), 'ignore')
def to_cons_title(self, message): self._screen_file.write(output)
"""Set console/terminal window title to message.""" self._screen_file.flush()
if not self.params.get('consoletitle', False):
return def to_stderr(self, message):
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): """Print message to stderr."""
# c_wchar_p() might not be necessary if `message` is assert type(message) == type(u'')
# already of type unicode() output = message + u'\n'
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
elif 'TERM' in os.environ: output = output.encode(preferredencoding())
sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) sys.stderr.write(output)
def fixed_template(self): def to_cons_title(self, message):
"""Checks if the output template is fixed.""" """Set console/terminal window title to message."""
return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None) if not self.params.get('consoletitle', False):
return
def trouble(self, message=None): if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
"""Determine action to take when a download problem appears. # c_wchar_p() might not be necessary if `message` is
# already of type unicode()
Depending on if the downloader has been configured to ignore ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
download errors or not, this method may throw an exception or elif 'TERM' in os.environ:
not when errors are found, after printing the message. sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
"""
if message is not None: def fixed_template(self):
self.to_stderr(message) """Checks if the output template is fixed."""
if not self.params.get('ignoreerrors', False): return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None)
raise DownloadError(message)
self._download_retcode = 1 def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit.""" Depending on if the downloader has been configured to ignore
rate_limit = self.params.get('ratelimit', None) download errors or not, this method may throw an exception or
if rate_limit is None or byte_counter == 0: not when errors are found, after printing the message.
return
now = time.time() tb, if given, is additional traceback information.
elapsed = now - start_time """
if elapsed <= 0.0: if message is not None:
return self.to_stderr(message)
speed = float(byte_counter) / elapsed if self.params.get('verbose'):
if speed > rate_limit: if tb is None:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) tb_data = traceback.format_list(traceback.extract_stack())
tb = u''.join(tb_data)
def temp_name(self, filename): self.to_stderr(tb)
"""Returns a temporary filename for the given filename.""" if not self.params.get('ignoreerrors', False):
if self.params.get('nopart', False) or filename == u'-' or \ raise DownloadError(message)
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): self._download_retcode = 1
return filename
return filename + u'.part' def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
def undo_temp_name(self, filename): rate_limit = self.params.get('ratelimit', None)
if filename.endswith(u'.part'): if rate_limit is None or byte_counter == 0:
return filename[:-len(u'.part')] return
return filename now = time.time()
elapsed = now - start_time
def try_rename(self, old_filename, new_filename): if elapsed <= 0.0:
try: return
if old_filename == new_filename: speed = float(byte_counter) / elapsed
return if speed > rate_limit:
os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
except (IOError, OSError), err:
self.trouble(u'ERROR: unable to rename file') def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
def try_utime(self, filename, last_modified_hdr): if self.params.get('nopart', False) or filename == u'-' or \
"""Try to set the last-modified time of the given file.""" (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
if last_modified_hdr is None: return filename
return return filename + u'.part'
if not os.path.isfile(encodeFilename(filename)):
return def undo_temp_name(self, filename):
timestr = last_modified_hdr if filename.endswith(u'.part'):
if timestr is None: return filename[:-len(u'.part')]
return return filename
filetime = timeconvert(timestr)
if filetime is None: def try_rename(self, old_filename, new_filename):
return filetime try:
try: if old_filename == new_filename:
os.utime(filename, (time.time(), filetime)) return
except: os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
pass except (IOError, OSError) as err:
return filetime self.trouble(u'ERROR: unable to rename file')
def report_writedescription(self, descfn): def try_utime(self, filename, last_modified_hdr):
""" Report that the description file is being written """ """Try to set the last-modified time of the given file."""
self.to_screen(u'[info] Writing video description to: ' + descfn) if last_modified_hdr is None:
return
def report_writesubtitles(self, srtfn): if not os.path.isfile(encodeFilename(filename)):
""" Report that the subtitles file is being written """ return
self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) timestr = last_modified_hdr
if timestr is None:
def report_writeinfojson(self, infofn): return
""" Report that the metadata file has been written """ filetime = timeconvert(timestr)
self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) if filetime is None:
return filetime
def report_destination(self, filename): try:
"""Report destination filename.""" os.utime(filename, (time.time(), filetime))
self.to_screen(u'[download] Destination: ' + filename) except:
pass
def report_progress(self, percent_str, data_len_str, speed_str, eta_str): return filetime
"""Report download progress."""
if self.params.get('noprogress', False): def report_writedescription(self, descfn):
return """ Report that the description file is being written """
self.to_screen(u'\r[download] %s of %s at %s ETA %s' % self.to_screen(u'[info] Writing video description to: ' + descfn)
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % def report_writesubtitles(self, srtfn):
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) """ Report that the subtitles file is being written """
self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte.""" def report_writeinfojson(self, infofn):
self.to_screen(u'[download] Resuming download at byte %s' % resume_len) """ Report that the metadata file has been written """
self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx""" def report_destination(self, filename):
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) """Report destination filename."""
self.to_screen(u'[download] Destination: ' + filename)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded.""" def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
try: """Report download progress."""
self.to_screen(u'[download] %s has already been downloaded' % file_name) if self.params.get('noprogress', False):
except (UnicodeEncodeError), err: return
self.to_screen(u'[download] The file has already been downloaded') self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
def report_unable_to_resume(self): self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
"""Report it was impossible to resume download.""" (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
self.to_screen(u'[download] Unable to resume')
def report_resuming_byte(self, resume_len):
def report_finish(self): """Report attempt to resume at given byte."""
"""Report download finished.""" self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
if self.params.get('noprogress', False):
self.to_screen(u'[download] Download completed') def report_retry(self, count, retries):
else: """Report retry in case of HTTP error 5xx"""
self.to_screen(u'') self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def increment_downloads(self): def report_file_already_downloaded(self, file_name):
"""Increment the ordinal that assigns a number to each file.""" """Report file has already been fully downloaded."""
self._num_downloads += 1 try:
self.to_screen(u'[download] %s has already been downloaded' % file_name)
def prepare_filename(self, info_dict): except (UnicodeEncodeError) as err:
"""Generate the output filename.""" self.to_screen(u'[download] The file has already been downloaded')
try:
template_dict = dict(info_dict) def report_unable_to_resume(self):
template_dict['epoch'] = unicode(long(time.time())) """Report it was impossible to resume download."""
template_dict['autonumber'] = unicode('%05d' % self._num_downloads) self.to_screen(u'[download] Unable to resume')
filename = self.params['outtmpl'] % template_dict
return filename def report_finish(self):
except (ValueError, KeyError), err: """Report download finished."""
self.trouble(u'ERROR: invalid system charset or erroneous output template') if self.params.get('noprogress', False):
return None self.to_screen(u'[download] Download completed')
else:
def _match_entry(self, info_dict): self.to_screen(u'')
""" Returns None iff the file should be downloaded """
def increment_downloads(self):
title = info_dict['title'] """Increment the ordinal that assigns a number to each file."""
matchtitle = self.params.get('matchtitle', False) self._num_downloads += 1
if matchtitle:
matchtitle = matchtitle.decode('utf8') def prepare_filename(self, info_dict):
if not re.search(matchtitle, title, re.IGNORECASE): """Generate the output filename."""
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' try:
rejecttitle = self.params.get('rejecttitle', False) template_dict = dict(info_dict)
if rejecttitle:
rejecttitle = rejecttitle.decode('utf8') template_dict['epoch'] = int(time.time())
if re.search(rejecttitle, title, re.IGNORECASE): template_dict['autonumber'] = u'%05d' % self._num_downloads
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None sanitize = lambda k,v: sanitize_filename(
u'NA' if v is None else compat_str(v),
def process_info(self, info_dict): restricted=self.params.get('restrictfilenames'),
"""Process a single dictionary returned by an InfoExtractor.""" is_id=(k==u'id'))
template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
info_dict['stitle'] = sanitize_filename(info_dict['title'])
filename = self.params['outtmpl'] % template_dict
reason = self._match_entry(info_dict) return filename
if reason is not None: except (ValueError, KeyError) as err:
self.to_screen(u'[download] ' + reason) self.trouble(u'ERROR: invalid system charset or erroneous output template')
return return None
max_downloads = self.params.get('max_downloads') def _match_entry(self, info_dict):
if max_downloads is not None: """ Returns None iff the file should be downloaded """
if self._num_downloads > int(max_downloads):
raise MaxDownloadsReached() title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
filename = self.prepare_filename(info_dict) if matchtitle:
matchtitle = matchtitle.decode('utf8')
# Forced printings if not re.search(matchtitle, title, re.IGNORECASE):
if self.params.get('forcetitle', False): return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') rejecttitle = self.params.get('rejecttitle', False)
if self.params.get('forceurl', False): if rejecttitle:
print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace') rejecttitle = rejecttitle.decode('utf8')
if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: if re.search(rejecttitle, title, re.IGNORECASE):
print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace') return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
if self.params.get('forcedescription', False) and 'description' in info_dict: return None
print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
if self.params.get('forcefilename', False) and filename is not None: def process_info(self, info_dict):
print filename.encode(preferredencoding(), 'xmlcharrefreplace') """Process a single dictionary returned by an InfoExtractor."""
if self.params.get('forceformat', False):
print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace') # Keep for backwards compatibility
info_dict['stitle'] = info_dict['title']
# Do nothing else if in simulate mode
if self.params.get('simulate', False): if not 'format' in info_dict:
return info_dict['format'] = info_dict['ext']
if filename is None: reason = self._match_entry(info_dict)
return if reason is not None:
self.to_screen(u'[download] ' + reason)
try: return
dn = os.path.dirname(encodeFilename(filename))
if dn != '' and not os.path.exists(dn): # dn is already encoded max_downloads = self.params.get('max_downloads')
os.makedirs(dn) if max_downloads is not None:
except (OSError, IOError), err: if self._num_downloads > int(max_downloads):
self.trouble(u'ERROR: unable to create directory ' + unicode(err)) raise MaxDownloadsReached()
return
filename = self.prepare_filename(info_dict)
if self.params.get('writedescription', False):
try: # Forced printings
descfn = filename + u'.description' if self.params.get('forcetitle', False):
self.report_writedescription(descfn) compat_print(info_dict['title'])
descfile = open(encodeFilename(descfn), 'wb') if self.params.get('forceurl', False):
try: compat_print(info_dict['url'])
descfile.write(info_dict['description'].encode('utf-8')) if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
finally: compat_print(info_dict['thumbnail'])
descfile.close() if self.params.get('forcedescription', False) and 'description' in info_dict:
except (OSError, IOError): compat_print(info_dict['description'])
self.trouble(u'ERROR: Cannot write description file ' + descfn) if self.params.get('forcefilename', False) and filename is not None:
return compat_print(filename)
if self.params.get('forceformat', False):
if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: compat_print(info_dict['format'])
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE # Do nothing else if in simulate mode
try: if self.params.get('simulate', False):
srtfn = filename.rsplit('.', 1)[0] + u'.srt' return
self.report_writesubtitles(srtfn)
srtfile = open(encodeFilename(srtfn), 'wb') if filename is None:
try: return
srtfile.write(info_dict['subtitles'].encode('utf-8'))
finally: try:
srtfile.close() dn = os.path.dirname(encodeFilename(filename))
except (OSError, IOError): if dn != '' and not os.path.exists(dn): # dn is already encoded
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) os.makedirs(dn)
return except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
if self.params.get('writeinfojson', False): return
infofn = filename + u'.info.json'
self.report_writeinfojson(infofn) if self.params.get('writedescription', False):
try: try:
json.dump descfn = filename + u'.description'
except (NameError,AttributeError): self.report_writedescription(descfn)
self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
return descfile.write(info_dict['description'])
try: except (OSError, IOError):
infof = open(encodeFilename(infofn), 'wb') self.trouble(u'ERROR: Cannot write description file ' + descfn)
try: return
json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
json.dump(json_info_dict, infof) if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
finally: # subtitles download errors are already managed as troubles in relevant IE
infof.close() # that way it will silently go on when used with unsupporting IE
except (OSError, IOError): try:
self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) srtfn = filename.rsplit('.', 1)[0] + u'.srt'
return self.report_writesubtitles(srtfn)
with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile:
if not self.params.get('skip_download', False): srtfile.write(info_dict['subtitles'])
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): except (OSError, IOError):
success = True self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
else: return
try:
success = self._do_download(filename, info_dict) if self.params.get('writeinfojson', False):
except (OSError, IOError), err: infofn = filename + u'.info.json'
raise UnavailableVideoError self.report_writeinfojson(infofn)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: try:
self.trouble(u'ERROR: unable to download video data: %s' % str(err)) json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
return write_json_file(json_info_dict, encodeFilename(infofn))
except (ContentTooShortError, ), err: except (OSError, IOError):
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
return return
if success: if not self.params.get('skip_download', False):
try: if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
self.post_process(filename, info_dict) success = True
except (PostProcessingError), err: else:
self.trouble(u'ERROR: postprocessing: %s' % str(err)) try:
return success = self._do_download(filename, info_dict)
except (OSError, IOError) as err:
def download(self, url_list): raise UnavailableVideoError()
"""Download a given list of URLs.""" except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if len(url_list) > 1 and self.fixed_template(): self.trouble(u'ERROR: unable to download video data: %s' % str(err))
raise SameFileError(self.params['outtmpl']) return
except (ContentTooShortError, ) as err:
for url in url_list: self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
suitable_found = False return
for ie in self._ies:
# Go to next InfoExtractor if not suitable if success:
if not ie.suitable(url): try:
continue self.post_process(filename, info_dict)
except (PostProcessingError) as err:
# Suitable InfoExtractor found self.trouble(u'ERROR: postprocessing: %s' % str(err))
suitable_found = True return
# Extract information from URL and process it def download(self, url_list):
videos = ie.extract(url) """Download a given list of URLs."""
for video in videos or []: if len(url_list) > 1 and self.fixed_template():
video['extractor'] = ie.IE_NAME raise SameFileError(self.params['outtmpl'])
try:
self.increment_downloads() for url in url_list:
self.process_info(video) suitable_found = False
except UnavailableVideoError: for ie in self._ies:
self.trouble(u'\nERROR: unable to download video') # Go to next InfoExtractor if not suitable
if not ie.suitable(url):
# Suitable InfoExtractor had been found; go to next URL continue
break
# Warn if the _WORKING attribute is False
if not suitable_found: if not ie.working():
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
u'and will probably not work. If you want to go on, use the -i option.')
return self._download_retcode
# Suitable InfoExtractor found
def post_process(self, filename, ie_info): suitable_found = True
"""Run the postprocessing chain on the given file."""
info = dict(ie_info) # Extract information from URL and process it
info['filepath'] = filename try:
for pp in self._pps: videos = ie.extract(url)
info = pp.run(info) except ExtractorError as de: # An error we somewhat expected
if info is None: self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
break break
except Exception as e:
def _download_with_rtmpdump(self, filename, url, player_url): if self.params.get('ignoreerrors', False):
self.report_destination(filename) self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
tmpfilename = self.temp_name(filename) break
else:
# Check for rtmpdump first raise
try:
subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) if len(videos or []) > 1 and self.fixed_template():
except (OSError, IOError): raise SameFileError(self.params['outtmpl'])
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
return False for video in videos or []:
video['extractor'] = ie.IE_NAME
# Download using rtmpdump. rtmpdump returns exit code 2 when try:
# the connection was interrumpted and resuming appears to be self.increment_downloads()
# possible. This is part of rtmpdump's normal usage, AFAIK. self.process_info(video)
basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] except UnavailableVideoError:
args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] self.trouble(u'\nERROR: unable to download video')
if self.params.get('verbose', False):
try: # Suitable InfoExtractor had been found; go to next URL
import pipes break
shell_quote = lambda args: ' '.join(map(pipes.quote, args))
except ImportError: if not suitable_found:
shell_quote = repr self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
retval = subprocess.call(args) return self._download_retcode
while retval == 2 or retval == 1:
prevsize = os.path.getsize(encodeFilename(tmpfilename)) def post_process(self, filename, ie_info):
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) """Run the postprocessing chain on the given file."""
time.sleep(5.0) # This seems to be needed info = dict(ie_info)
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) info['filepath'] = filename
cursize = os.path.getsize(encodeFilename(tmpfilename)) for pp in self._pps:
if prevsize == cursize and retval == 1: info = pp.run(info)
break if info is None:
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those break
if prevsize == cursize and retval == 2 and cursize > 1024:
self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') def _download_with_rtmpdump(self, filename, url, player_url, page_url):
retval = 0 self.report_destination(filename)
break tmpfilename = self.temp_name(filename)
if retval == 0:
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) # Check for rtmpdump first
self.try_rename(tmpfilename, filename) try:
return True subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
else: except (OSError, IOError):
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
return False return False
def _do_download(self, filename, info_dict): # Download using rtmpdump. rtmpdump returns exit code 2 when
url = info_dict['url'] # the connection was interrumpted and resuming appears to be
player_url = info_dict.get('player_url', None) # possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = ['rtmpdump', '-q', '-r', url, '-o', tmpfilename]
# Check file already present if player_url is not None:
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): basic_args += ['-W', player_url]
self.report_file_already_downloaded(filename) if page_url is not None:
return True basic_args += ['--pageUrl', page_url]
args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
# Attempt to download using rtmpdump if self.params.get('verbose', False):
if url.startswith('rtmp'): try:
return self._download_with_rtmpdump(filename, url, player_url) import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, args))
tmpfilename = self.temp_name(filename) except ImportError:
stream = None shell_quote = repr
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
# Do not include the Accept-Encoding header retval = subprocess.call(args)
headers = {'Youtubedl-no-compression': 'True'} while retval == 2 or retval == 1:
basic_request = urllib2.Request(url, None, headers) prevsize = os.path.getsize(encodeFilename(tmpfilename))
request = urllib2.Request(url, None, headers) self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
time.sleep(5.0) # This seems to be needed
# Establish possible resume length retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
if os.path.isfile(encodeFilename(tmpfilename)): cursize = os.path.getsize(encodeFilename(tmpfilename))
resume_len = os.path.getsize(encodeFilename(tmpfilename)) if prevsize == cursize and retval == 1:
else: break
resume_len = 0 # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == 2 and cursize > 1024:
open_mode = 'wb' self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
if resume_len != 0: retval = 0
if self.params.get('continuedl', False): break
self.report_resuming_byte(resume_len) if retval == 0:
request.add_header('Range','bytes=%d-' % resume_len) self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename)))
open_mode = 'ab' self.try_rename(tmpfilename, filename)
else: return True
resume_len = 0 else:
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
count = 0 return False
retries = self.params.get('retries', 0)
while count <= retries: def _do_download(self, filename, info_dict):
# Establish connection url = info_dict['url']
try:
if count == 0 and 'urlhandle' in info_dict: # Check file already present
data = info_dict['urlhandle'] if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
data = urllib2.urlopen(request) self.report_file_already_downloaded(filename)
break return True
except (urllib2.HTTPError, ), err:
if (err.code < 500 or err.code >= 600) and err.code != 416: # Attempt to download using rtmpdump
# Unexpected HTTP error if url.startswith('rtmp'):
raise return self._download_with_rtmpdump(filename, url,
elif err.code == 416: info_dict.get('player_url', None),
# Unable to resume (requested range not satisfiable) info_dict.get('page_url', None))
try:
# Open the connection again without the range header tmpfilename = self.temp_name(filename)
data = urllib2.urlopen(basic_request) stream = None
content_length = data.info()['Content-Length']
except (urllib2.HTTPError, ), err: # Do not include the Accept-Encoding header
if err.code < 500 or err.code >= 600: headers = {'Youtubedl-no-compression': 'True'}
raise basic_request = compat_urllib_request.Request(url, None, headers)
else: request = compat_urllib_request.Request(url, None, headers)
# Examine the reported length
if (content_length is not None and if self.params.get('test', False):
(resume_len - 100 < long(content_length) < resume_len + 100)): request.add_header('Range','bytes=0-10240')
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that # Establish possible resume length
# YouTube sometimes adds or removes a few bytes from the end of the file, if os.path.isfile(encodeFilename(tmpfilename)):
# changing the file size slightly and causing problems for some users. So resume_len = os.path.getsize(encodeFilename(tmpfilename))
# I decided to implement a suggested change and consider the file else:
# completely downloaded if the file size differs less than 100 bytes from resume_len = 0
# the one in the hard drive.
self.report_file_already_downloaded(filename) open_mode = 'wb'
self.try_rename(tmpfilename, filename) if resume_len != 0:
return True if self.params.get('continuedl', False):
else: self.report_resuming_byte(resume_len)
# The length does not match, we start the download over request.add_header('Range','bytes=%d-' % resume_len)
self.report_unable_to_resume() open_mode = 'ab'
open_mode = 'wb' else:
break resume_len = 0
# Retry
count += 1 count = 0
if count <= retries: retries = self.params.get('retries', 0)
self.report_retry(count, retries) while count <= retries:
# Establish connection
if count > retries: try:
self.trouble(u'ERROR: giving up after %s retries' % retries) if count == 0 and 'urlhandle' in info_dict:
return False data = info_dict['urlhandle']
data = compat_urllib_request.urlopen(request)
data_len = data.info().get('Content-length', None) break
if data_len is not None: except (compat_urllib_error.HTTPError, ) as err:
data_len = long(data_len) + resume_len if (err.code < 500 or err.code >= 600) and err.code != 416:
data_len_str = self.format_bytes(data_len) # Unexpected HTTP error
byte_counter = 0 + resume_len raise
block_size = 1024 elif err.code == 416:
start = time.time() # Unable to resume (requested range not satisfiable)
while True: try:
# Download and write # Open the connection again without the range header
before = time.time() data = compat_urllib_request.urlopen(basic_request)
data_block = data.read(block_size) content_length = data.info()['Content-Length']
after = time.time() except (compat_urllib_error.HTTPError, ) as err:
if len(data_block) == 0: if err.code < 500 or err.code >= 600:
break raise
byte_counter += len(data_block) else:
# Examine the reported length
# Open file just in time if (content_length is not None and
if stream is None: (resume_len - 100 < int(content_length) < resume_len + 100)):
try: # The file had already been fully downloaded.
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) # Explanation to the above condition: in issue #175 it was revealed that
assert stream is not None # YouTube sometimes adds or removes a few bytes from the end of the file,
filename = self.undo_temp_name(tmpfilename) # changing the file size slightly and causing problems for some users. So
self.report_destination(filename) # I decided to implement a suggested change and consider the file
except (OSError, IOError), err: # completely downloaded if the file size differs less than 100 bytes from
self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) # the one in the hard drive.
return False self.report_file_already_downloaded(filename)
try: self.try_rename(tmpfilename, filename)
stream.write(data_block) return True
except (IOError, OSError), err: else:
self.trouble(u'\nERROR: unable to write data: %s' % str(err)) # The length does not match, we start the download over
return False self.report_unable_to_resume()
block_size = self.best_block_size(after - before, len(data_block)) open_mode = 'wb'
break
# Progress message # Retry
speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) count += 1
if data_len is None: if count <= retries:
self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') self.report_retry(count, retries)
else:
percent_str = self.calc_percent(byte_counter, data_len) if count > retries:
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) self.trouble(u'ERROR: giving up after %s retries' % retries)
self.report_progress(percent_str, data_len_str, speed_str, eta_str) return False
# Apply rate limit data_len = data.info().get('Content-length', None)
self.slow_down(start, byte_counter - resume_len) if data_len is not None:
data_len = int(data_len) + resume_len
if stream is None: data_len_str = self.format_bytes(data_len)
self.trouble(u'\nERROR: Did not get any data blocks') byte_counter = 0 + resume_len
return False block_size = self.params.get('buffersize', 1024)
stream.close() start = time.time()
self.report_finish() while True:
if data_len is not None and byte_counter != data_len: # Download and write
raise ContentTooShortError(byte_counter, long(data_len)) before = time.time()
self.try_rename(tmpfilename, filename) data_block = data.read(block_size)
after = time.time()
# Update file modification time if len(data_block) == 0:
if self.params.get('updatetime', True): break
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) byte_counter += len(data_block)
return True # Open file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
# Progress message
speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
if data_len is None:
self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
else:
percent_str = self.calc_percent(byte_counter, data_len)
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent_str, data_len_str, speed_str, eta_str)
# Apply rate limit
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.trouble(u'\nERROR: Did not get any data blocks')
return False
stream.close()
self.report_finish()
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
return True

6366
youtube_dl/InfoExtractors.py Normal file → Executable file
View File

@ -1,3696 +1,3782 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import datetime import datetime
import HTMLParser
import httplib
import netrc import netrc
import os import os
import re import re
import socket import socket
import time import time
import urllib
import urllib2
import email.utils import email.utils
import xml.etree.ElementTree import xml.etree.ElementTree
import random import random
import math import math
from urlparse import parse_qs, urlparse
try: from .utils import *
import cStringIO as StringIO
except ImportError:
import StringIO
from utils import *
class InfoExtractor(object): class InfoExtractor(object):
"""Information Extractor class. """Information Extractor class.
Information extractors are the classes that, given a URL, extract Information extractors are the classes that, given a URL, extract
information from the video (or videos) the URL refers to. This information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title and simplified information includes the real video URL, the video title, author and
title, author and others. The information is stored in a dictionary others. The information is stored in a dictionary which is then
which is then passed to the FileDownloader. The FileDownloader passed to the FileDownloader. The FileDownloader processes this
processes this information possibly downloading the video to the file information possibly downloading the video to the file system, among
system, among other possible outcomes. The dictionaries must include other possible outcomes.
the following fields:
id: Video identifier. The dictionaries must include the following fields:
url: Final video URL.
uploader: Nickname of the video uploader.
title: Literal title.
ext: Video filename extension.
format: Video format.
player_url: SWF Player URL (may be None).
The following fields are optional. Their primary purpose is to allow id: Video identifier.
youtube-dl to serve as the backend for a video search function, such url: Final video URL.
as the one in youtube2mp3. They are only used when their respective title: Video title, unescaped.
forced printing functions are called: ext: Video filename extension.
uploader: Full name of the video uploader.
upload_date: Video upload date (YYYYMMDD).
thumbnail: Full URL to a video thumbnail image. The following fields are optional:
description: One-line video description.
Subclasses of this one should re-define the _real_initialize() and format: The video format, defaults to ext (used for --get-format)
_real_extract() methods and define a _VALID_URL regexp. thumbnail: Full URL to a video thumbnail image.
Probably, they should also be added to the list of extractors. description: One-line video description.
""" uploader_id: Nickname or id of the video uploader.
player_url: SWF Player URL (used for rtmpdump).
subtitles: The .srt file contents.
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
_ready = False The fields should all be Unicode strings.
_downloader = None
def __init__(self, downloader=None): Subclasses of this one should re-define the _real_initialize() and
"""Constructor. Receives an optional downloader.""" _real_extract() methods and define a _VALID_URL regexp.
self._ready = False Probably, they should also be added to the list of extractors.
self.set_downloader(downloader)
def suitable(self, url): _real_extract() must return a *list* of information dictionaries as
"""Receives a URL and returns True if suitable for this IE.""" described above.
return re.match(self._VALID_URL, url) is not None
def initialize(self): Finally, the _WORKING attribute should be set to False for broken IEs
"""Initializes an instance (authentication, etc).""" in order to warn the users and skip the tests.
if not self._ready: """
self._real_initialize()
self._ready = True
def extract(self, url): _ready = False
"""Extracts URL information and returns it in list of dicts.""" _downloader = None
self.initialize() _WORKING = True
return self._real_extract(url)
def set_downloader(self, downloader): def __init__(self, downloader=None):
"""Sets the downloader for this IE.""" """Constructor. Receives an optional downloader."""
self._downloader = downloader self._ready = False
self.set_downloader(downloader)
def _real_initialize(self): def suitable(self, url):
"""Real initialization process. Redefine in subclasses.""" """Receives a URL and returns True if suitable for this IE."""
pass return re.match(self._VALID_URL, url) is not None
def _real_extract(self, url): def working(self):
"""Real extraction process. Redefine in subclasses.""" """Getter method for _WORKING."""
pass return self._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
self.initialize()
return self._real_extract(url)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
if note is None:
note = u'Downloading video webpage'
self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
try:
urlh = compat_urllib_request.urlopen(url_or_request)
webpage_bytes = urlh.read()
return webpage_bytes.decode('utf-8', 'replace')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is None:
errnote = u'Unable to download webpage'
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
class YoutubeIE(InfoExtractor): class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com.""" """Information extractor for youtube.com."""
_VALID_URL = r"""^ _VALID_URL = r"""^
( (
(?:https?://)? # http(s):// (optional) (?:https?://)? # http(s):// (optional)
(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls (?:.*?\#/)? # handle anchor (#/) redirect urls
(?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID: (?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/ (?:(?:v|embed|e)/) # v/ or embed/ or e/
|(?: # or the v= param in all its forms |(?: # or the v= param in all its forms
(?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #! (?:\?|\#!?) # the params delimiter ? or # or #!
(?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx) (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
v= v=
) )
)? # optional -> youtube.com/xxxx is OK )? # optional -> youtube.com/xxxx is OK
)? # all until now is optional -> you can pass the naked ID )? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]+) # here is it! the YouTube video ID ([0-9A-Za-z_-]+) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow (?(1).+)? # if we found the ID, everything can follow
$""" $"""
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)' _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_NETRC_MACHINE = 'youtube' _NETRC_MACHINE = 'youtube'
# Listed in order of quality # Listed in order of quality
_available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
_available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
_video_extensions = { _video_extensions = {
'13': '3gp', '13': '3gp',
'17': 'mp4', '17': 'mp4',
'18': 'mp4', '18': 'mp4',
'22': 'mp4', '22': 'mp4',
'37': 'mp4', '37': 'mp4',
'38': 'video', # You actually don't know if this will be MOV, AVI or whatever '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
'43': 'webm', '43': 'webm',
'44': 'webm', '44': 'webm',
'45': 'webm', '45': 'webm',
'46': 'webm', '46': 'webm',
} }
_video_dimensions = { _video_dimensions = {
'5': '240x400', '5': '240x400',
'6': '???', '6': '???',
'13': '???', '13': '???',
'17': '144x176', '17': '144x176',
'18': '360x640', '18': '360x640',
'22': '720x1280', '22': '720x1280',
'34': '360x640', '34': '360x640',
'35': '480x854', '35': '480x854',
'37': '1080x1920', '37': '1080x1920',
'38': '3072x4096', '38': '3072x4096',
'43': '360x640', '43': '360x640',
'44': '480x854', '44': '480x854',
'45': '720x1280', '45': '720x1280',
'46': '1080x1920', '46': '1080x1920',
} }
IE_NAME = u'youtube' IE_NAME = u'youtube'
def suitable(self, url): def suitable(self, url):
"""Receives a URL and returns True if suitable for this IE.""" """Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url, re.VERBOSE) is not None return re.match(self._VALID_URL, url, re.VERBOSE) is not None
def report_lang(self): def report_lang(self):
"""Report attempt to set language.""" """Report attempt to set language."""
self._downloader.to_screen(u'[youtube] Setting language') self._downloader.to_screen(u'[youtube] Setting language')
def report_login(self): def report_login(self):
"""Report attempt to log in.""" """Report attempt to log in."""
self._downloader.to_screen(u'[youtube] Logging in') self._downloader.to_screen(u'[youtube] Logging in')
def report_age_confirmation(self): def report_age_confirmation(self):
"""Report attempt to confirm age.""" """Report attempt to confirm age."""
self._downloader.to_screen(u'[youtube] Confirming age') self._downloader.to_screen(u'[youtube] Confirming age')
def report_video_webpage_download(self, video_id): def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage.""" """Report attempt to download video webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
def report_video_info_webpage_download(self, video_id): def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage.""" """Report attempt to download video info webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
def report_video_subtitles_download(self, video_id): def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage.""" """Report attempt to download video info webpage."""
self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
def report_information_extraction(self, video_id): def report_information_extraction(self, video_id):
"""Report attempt to extract video information.""" """Report attempt to extract video information."""
self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format): def report_unavailable_format(self, video_id, format):
"""Report extracted video URL.""" """Report extracted video URL."""
self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
def report_rtmp_download(self): def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol.""" """Indicate the download will use the RTMP protocol."""
self._downloader.to_screen(u'[youtube] RTMP download detected') self._downloader.to_screen(u'[youtube] RTMP download detected')
def _closed_captions_xml_to_srt(self, xml_string): def _closed_captions_xml_to_srt(self, xml_string):
srt = '' srt = ''
texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE) texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
# TODO parse xml instead of regex # TODO parse xml instead of regex
for n, (start, dur_tag, dur, caption) in enumerate(texts): for n, (start, dur_tag, dur, caption) in enumerate(texts):
if not dur: dur = '4' if not dur: dur = '4'
start = float(start) start = float(start)
end = start + float(dur) end = start + float(dur)
start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
caption = unescapeHTML(caption) caption = unescapeHTML(caption)
caption = unescapeHTML(caption) # double cycle, intentional caption = unescapeHTML(caption) # double cycle, intentional
srt += str(n+1) + '\n' srt += str(n+1) + '\n'
srt += start + ' --> ' + end + '\n' srt += start + ' --> ' + end + '\n'
srt += caption + '\n\n' srt += caption + '\n\n'
return srt return srt
def _print_formats(self, formats): def _extract_subtitles(self, video_id):
print 'Available formats:' self.report_video_subtitles_download(video_id)
for x in formats: request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')) try:
srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
if not srt_lang_list:
return (u'WARNING: video has no closed captions', None)
if self._downloader.params.get('subtitleslang', False):
srt_lang = self._downloader.params.get('subtitleslang')
elif 'en' in srt_lang_list:
srt_lang = 'en'
else:
srt_lang = list(srt_lang_list.keys())[0]
if not srt_lang in srt_lang_list:
return (u'WARNING: no closed captions found in the specified language', None)
request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
try:
srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
if not srt_xml:
return (u'WARNING: unable to download video subtitles', None)
return (None, self._closed_captions_xml_to_srt(srt_xml))
def _real_initialize(self): def _print_formats(self, formats):
if self._downloader is None: print('Available formats:')
return for x in formats:
print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
username = None def _real_initialize(self):
password = None if self._downloader is None:
downloader_params = self._downloader.params return
# Attempt to use provided username and password or .netrc data username = None
if downloader_params.get('username', None) is not None: password = None
username = downloader_params['username'] downloader_params = self._downloader.params
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
return
# Set language # Attempt to use provided username and password or .netrc data
request = urllib2.Request(self._LANG_URL) if downloader_params.get('username', None) is not None:
try: username = downloader_params['username']
self.report_lang() password = downloader_params['password']
urllib2.urlopen(request).read() elif downloader_params.get('usenetrc', False):
except (urllib2.URLError, httplib.HTTPException, socket.error), err: try:
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) info = netrc.netrc().authenticators(self._NETRC_MACHINE)
return if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return
# No authentication to be performed # Set language
if username is None: request = compat_urllib_request.Request(self._LANG_URL)
return try:
self.report_lang()
compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
return
# Log in # No authentication to be performed
login_form = { if username is None:
'current_form': 'loginForm', return
'next': '/',
'action_login': 'Log In',
'username': username,
'password': password,
}
request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
try:
self.report_login()
login_results = urllib2.urlopen(request).read()
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
return
# Confirm age # Log in
age_form = { login_form = {
'next_url': '/', 'current_form': 'loginForm',
'action_confirm': 'Confirm', 'next': '/',
} 'action_login': 'Log In',
request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form)) 'username': username,
try: 'password': password,
self.report_age_confirmation() }
age_results = urllib2.urlopen(request).read() request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
except (urllib2.URLError, httplib.HTTPException, socket.error), err: try:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) self.report_login()
return login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
return
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return
def _real_extract(self, url): # Confirm age
# Extract original video URL from URL with redirection, like age verification, using next_url parameter age_form = {
mobj = re.search(self._NEXT_URL_RE, url) 'next_url': '/',
if mobj: 'action_confirm': 'Confirm',
url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/') }
request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
try:
self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return
# Extract video id from URL def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
return video_id
# Get video webpage def _real_extract(self, url):
self.report_video_webpage_download(video_id) # Extract original video URL from URL with redirection, like age verification, using next_url parameter
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) mobj = re.search(self._NEXT_URL_RE, url)
try: if mobj:
video_webpage = urllib2.urlopen(request).read() url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: video_id = self._extract_id(url)
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
# Attempt to extract SWF player URL # Get video webpage
mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) self.report_video_webpage_download(video_id)
if mobj is not None: url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) request = compat_urllib_request.Request(url)
else: try:
player_url = None video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return
# Get video info video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type))
request = urllib2.Request(video_info_url)
try:
video_info_webpage = urllib2.urlopen(request).read()
video_info = parse_qs(video_info_webpage)
if 'token' in video_info:
break
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
return
if 'token' not in video_info:
if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
# Check for "rental" videos # Attempt to extract SWF player URL
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
self._downloader.trouble(u'ERROR: "rental" videos not supported') if mobj is not None:
return player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
# Start extracting information # Get video info
self.report_information_extraction(video_id) self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (video_id, el_type))
request = compat_urllib_request.Request(video_info_url)
try:
video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
video_info = compat_parse_qs(video_info_webpage)
if 'token' in video_info:
break
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
return
if 'token' not in video_info:
if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
# uploader # Check for "rental" videos
if 'author' not in video_info: if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: "rental" videos not supported')
return return
video_uploader = urllib.unquote_plus(video_info['author'][0])
# title # Start extracting information
if 'title' not in video_info: self.report_information_extraction(video_id)
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = urllib.unquote_plus(video_info['title'][0])
video_title = video_title.decode('utf-8')
# thumbnail image # uploader
if 'thumbnail_url' not in video_info: if 'author' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract uploader name')
video_thumbnail = '' return
else: # don't panic if we can't find it video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
# upload date # uploader_id
upload_date = u'NA' video_uploader_id = None
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None: if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) video_uploader_id = mobj.group(1)
format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] else:
for expression in format_expressions: self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
try:
upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
except:
pass
# description # title
video_description = get_element_by_id("eow-description", video_webpage.decode('utf8')) if 'title' not in video_info:
if video_description: video_description = clean_html(video_description) self._downloader.trouble(u'ERROR: unable to extract video title')
else: video_description = '' return
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# closed captions
video_subtitles = None
if self._downloader.params.get('writesubtitles', False):
try:
self.report_video_subtitles_download(video_id)
request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try:
srt_list = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
if not srt_lang_list:
raise Trouble(u'WARNING: video has no closed captions')
if self._downloader.params.get('subtitleslang', False):
srt_lang = self._downloader.params.get('subtitleslang')
elif 'en' in srt_lang_list:
srt_lang = 'en'
else:
srt_lang = srt_lang_list.keys()[0]
if not srt_lang in srt_lang_list:
raise Trouble(u'WARNING: no closed captions found in the specified language')
request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
try:
srt_xml = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
if not srt_xml:
raise Trouble(u'WARNING: unable to download video subtitles')
video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))
except Trouble as trouble:
self._downloader.trouble(trouble[0])
if 'length_seconds' not in video_info: # thumbnail image
self._downloader.trouble(u'WARNING: unable to extract video duration') if 'thumbnail_url' not in video_info:
video_duration = '' self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
else: video_thumbnail = ''
video_duration = urllib.unquote_plus(video_info['length_seconds'][0]) else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
# token # upload date
video_token = urllib.unquote_plus(video_info['token'][0]) upload_date = None
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
for expression in format_expressions:
try:
upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
except:
pass
# Decide which formats to download # description
req_format = self._downloader.params.get('format', None) video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = clean_html(video_description)
else:
video_description = ''
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): # closed captions
self.report_rtmp_download() video_subtitles = None
video_url_list = [(None, video_info['conn'][0])] if self._downloader.params.get('writesubtitles', False):
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: (srt_error, video_subtitles) = self._extract_subtitles(video_id)
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') if srt_error:
url_data = [parse_qs(uds) for uds in url_data_strs] self._downloader.trouble(srt_error)
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None) if 'length_seconds' not in video_info:
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats self._downloader.trouble(u'WARNING: unable to extract video duration')
if format_limit is not None and format_limit in available_formats: video_duration = ''
format_list = available_formats[available_formats.index(format_limit):] else:
else: video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
return
if req_format is None or req_format == 'best':
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
elif req_format in ('-1', 'all'):
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else:
# Specific formats. We pick the first in a slash-delimeted sequence.
# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
req_formats = req_format.split('/')
video_url_list = None
for rf in req_formats:
if rf in url_map:
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
self._downloader.trouble(u'ERROR: requested format not available')
return
else:
self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
return
results = [] # token
for format_param, video_real_url in video_url_list: video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
# Extension
video_extension = self._video_extensions.get(format_param, 'flv')
results.append({ # Decide which formats to download
'id': video_id.decode('utf-8'), req_format = self._downloader.params.get('format', None)
'url': video_real_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
'upload_date': upload_date, self.report_rtmp_download()
'title': video_title, video_url_list = [(None, video_info['conn'][0])]
'ext': video_extension.decode('utf-8'), elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
'format': (format_param is None and u'NA' or format_param.decode('utf-8')), url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
'thumbnail': video_thumbnail.decode('utf-8'), url_data = [compat_parse_qs(uds) for uds in url_data_strs]
'description': video_description, url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
'player_url': player_url, url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
'subtitles': video_subtitles,
'duration': video_duration format_limit = self._downloader.params.get('format_limit', None)
}) available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
return results if format_limit is not None and format_limit in available_formats:
format_list = available_formats[available_formats.index(format_limit):]
else:
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
return
if req_format is None or req_format == 'best':
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
elif req_format in ('-1', 'all'):
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else:
# Specific formats. We pick the first in a slash-delimeted sequence.
# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
req_formats = req_format.split('/')
video_url_list = None
for rf in req_formats:
if rf in url_map:
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
self._downloader.trouble(u'ERROR: requested format not available')
return
else:
self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
return
results = []
for format_param, video_real_url in video_url_list:
# Extension
video_extension = self._video_extensions.get(format_param, 'flv')
video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
self._video_dimensions.get(format_param, '???'))
results.append({
'id': video_id,
'url': video_real_url,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'ext': video_extension,
'format': video_format,
'thumbnail': video_thumbnail,
'description': video_description,
'player_url': player_url,
'subtitles': video_subtitles,
'duration': video_duration
})
return results
class MetacafeIE(InfoExtractor): class MetacafeIE(InfoExtractor):
"""Information Extractor for metacafe.com.""" """Information Extractor for metacafe.com."""
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/' _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = u'metacafe' IE_NAME = u'metacafe'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_disclaimer(self): def report_disclaimer(self):
"""Report disclaimer retrieval.""" """Report disclaimer retrieval."""
self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
def report_age_confirmation(self): def report_age_confirmation(self):
"""Report attempt to confirm age.""" """Report attempt to confirm age."""
self._downloader.to_screen(u'[metacafe] Confirming age') self._downloader.to_screen(u'[metacafe] Confirming age')
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
def _real_initialize(self): def _real_initialize(self):
# Retrieve disclaimer # Retrieve disclaimer
request = urllib2.Request(self._DISCLAIMER) request = compat_urllib_request.Request(self._DISCLAIMER)
try: try:
self.report_disclaimer() self.report_disclaimer()
disclaimer = urllib2.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
return return
# Confirm age # Confirm age
disclaimer_form = { disclaimer_form = {
'filters': '0', 'filters': '0',
'submit': "Continue - I'm over 18", 'submit': "Continue - I'm over 18",
} }
request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form)) request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
try: try:
self.report_age_confirmation() self.report_age_confirmation()
disclaimer = urllib2.urlopen(request).read() disclaimer = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Check if video comes from YouTube # Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id) mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None: if mobj2 is not None:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
return return
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader and title from webpage # Extract URL, uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None: if mobj is not None:
mediaURL = urllib.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_extension = mediaURL[-3:] video_extension = mediaURL[-3:]
# Extract gdaKey if available # Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None: if mobj is None:
video_url = mediaURL video_url = mediaURL
else: else:
gdaKey = mobj.group(1) gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
else: else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
vardict = parse_qs(mobj.group(1)) vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict: if 'mediaData' not in vardict:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mediaURL = mobj.group(1).replace('\\/', '/') mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:] video_extension = mediaURL[-3:]
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage) mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return return
video_uploader = mobj.group(1) video_uploader = mobj.group(1)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'),
'upload_date': u'NA', 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'format': u'NA', }]
'player_url': None,
}]
class DailymotionIE(InfoExtractor): class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion""" """Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion' IE_NAME = u'dailymotion'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_extraction(self, video_id):
"""Report webpage download.""" """Report information extraction."""
self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
def report_extraction(self, video_id): def _real_extract(self, url):
"""Report information extraction.""" # Extract id and simplified title from URL
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
def _real_extract(self, url): video_id = mobj.group(1).split('_')[0].split('?')[0]
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(1).split('_')[0].split('?')[0] video_extension = 'mp4'
video_extension = 'mp4' # Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off')
webpage = self._download_webpage(request, video_id)
# Retrieve video webpage to extract further information # Extract URL, uploader and title from webpage
request = urllib2.Request(url) self.report_extraction(video_id)
request.add_header('Cookie', 'family_filter=off') mobj = re.search(r'\s*var flashvars = (.*)', webpage)
try: if mobj is None:
self.report_download_webpage(video_id) self._downloader.trouble(u'ERROR: unable to extract media URL')
webpage = urllib2.urlopen(request).read() return
except (urllib2.URLError, httplib.HTTPException, socket.error), err: flashvars = compat_urllib_parse.unquote(mobj.group(1))
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
return
# Extract URL, uploader and title from webpage for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
self.report_extraction(video_id) if key in flashvars:
mobj = re.search(r'\s*var flashvars = (.*)', webpage) max_quality = key
if mobj is None: self._downloader.to_screen(u'[dailymotion] Using %s' % key)
self._downloader.trouble(u'ERROR: unable to extract media URL') break
return else:
flashvars = urllib.unquote(mobj.group(1)) self._downloader.trouble(u'ERROR: unable to extract video URL')
return
for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if key in flashvars: if mobj is None:
max_quality = key self._downloader.trouble(u'ERROR: unable to extract video URL')
self._downloader.to_screen(u'[dailymotion] Using %s' % key) return
break
else:
self._downloader.trouble(u'ERROR: unable to extract video URL')
return
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video URL')
return
video_url = urllib.unquote(mobj.group(1)).replace('\\/', '/') # TODO: support choosing qualities
# TODO: support choosing qualities mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = unescapeHTML(mobj.group('title'))
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage) video_uploader = None
if mobj is None: mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
self._downloader.trouble(u'ERROR: unable to extract title') if mobj is None:
return # lookin for official user
video_title = unescapeHTML(mobj.group('title').decode('utf-8')) mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
if mobj_official is None:
self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
else:
video_uploader = mobj_official.group(1)
else:
video_uploader = mobj.group(1)
video_uploader = u'NA' video_upload_date = None
mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage) mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is None: if mobj is not None:
# lookin for official user video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
if mobj_official is None:
self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
else:
video_uploader = mobj_official.group(1)
else:
video_uploader = mobj.group(1)
video_upload_date = u'NA' return [{
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage) 'id': video_id,
if mobj is not None: 'url': video_url,
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) 'uploader': video_uploader,
'upload_date': video_upload_date,
return [{ 'title': video_title,
'id': video_id.decode('utf-8'), 'ext': video_extension,
'url': video_url.decode('utf-8'), }]
'uploader': video_uploader.decode('utf-8'),
'upload_date': video_upload_date,
'title': video_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
}]
class GoogleIE(InfoExtractor):
"""Information extractor for video.google.com."""
_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
IE_NAME = u'video.google'
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
video_id = mobj.group(1)
video_extension = 'mp4'
# Retrieve video webpage to extract further information
request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
mobj = re.search(r"download_url:'([^']+)'", webpage)
if mobj is None:
video_extension = 'flv'
mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = urllib.unquote(mobj.group(1))
mediaURL = mediaURL.replace('\\x3d', '\x3d')
mediaURL = mediaURL.replace('\\x26', '\x26')
video_url = mediaURL
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
# Extract video description
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description')
return
video_description = mobj.group(1).decode('utf-8')
if not video_description:
video_description = 'No description available.'
# Extract video thumbnail
if self._downloader.params.get('forcethumbnail', False):
request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1)
else: # we need something to pass to process_info
video_thumbnail = ''
return [{
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': u'NA',
'upload_date': u'NA',
'title': video_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
}]
class PhotobucketIE(InfoExtractor): class PhotobucketIE(InfoExtractor):
"""Information extractor for photobucket.com.""" """Information extractor for photobucket.com."""
_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
IE_NAME = u'photobucket' IE_NAME = u'photobucket'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
video_extension = 'flv' video_extension = 'flv'
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract URL, uploader, and title from webpage # Extract URL, uploader, and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
mediaURL = urllib.unquote(mobj.group(1)) mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_url = mediaURL video_url = mediaURL
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
video_uploader = mobj.group(2).decode('utf-8') video_uploader = mobj.group(2).decode('utf-8')
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'), 'url': video_url.decode('utf-8'),
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': u'NA', 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'format': u'NA', }]
'player_url': None,
}]
class YahooIE(InfoExtractor): class YahooIE(InfoExtractor):
"""Information extractor for video.yahoo.com.""" """Information extractor for video.yahoo.com."""
# _VALID_URL matches all Yahoo! Video URLs _WORKING = False
# _VPAGE_URL matches only the extractable '/watch/' URLs # _VALID_URL matches all Yahoo! Video URLs
_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' # _VPAGE_URL matches only the extractable '/watch/' URLs
_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
IE_NAME = u'video.yahoo' _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
IE_NAME = u'video.yahoo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(2) video_id = mobj.group(2)
video_extension = 'flv' video_extension = 'flv'
# Rewrite valid but non-extractable URLs as # Rewrite valid but non-extractable URLs as
# extractable English language /watch/ URLs # extractable English language /watch/ URLs
if re.match(self._VPAGE_URL, url) is None: if re.match(self._VPAGE_URL, url) is None:
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract id field') self._downloader.trouble(u'ERROR: Unable to extract id field')
return return
yahoo_id = mobj.group(1) yahoo_id = mobj.group(1)
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract vid field') self._downloader.trouble(u'ERROR: Unable to extract vid field')
return return
yahoo_vid = mobj.group(1) yahoo_vid = mobj.group(1)
url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
return self._real_extract(url, new_video=False) return self._real_extract(url, new_video=False)
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract uploader and title from webpage # Extract uploader and title from webpage
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader') self._downloader.trouble(u'ERROR: unable to extract video uploader')
return return
video_uploader = mobj.group(1).decode('utf-8') video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(1).decode('utf-8') video_thumbnail = mobj.group(1).decode('utf-8')
# Extract video description # Extract video description
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description') self._downloader.trouble(u'ERROR: unable to extract video description')
return return
video_description = mobj.group(1).decode('utf-8') video_description = mobj.group(1).decode('utf-8')
if not video_description: if not video_description:
video_description = 'No description available.' video_description = 'No description available.'
# Extract video height and width # Extract video height and width
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video height') self._downloader.trouble(u'ERROR: unable to extract video height')
return return
yv_video_height = mobj.group(1) yv_video_height = mobj.group(1)
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video width') self._downloader.trouble(u'ERROR: unable to extract video width')
return return
yv_video_width = mobj.group(1) yv_video_width = mobj.group(1)
# Retrieve video playlist to extract media URL # Retrieve video playlist to extract media URL
# I'm not completely sure what all these options are, but we # I'm not completely sure what all these options are, but we
# seem to need most of them, otherwise the server sends a 401. # seem to need most of them, otherwise the server sends a 401.
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
yv_bitrate = '700' # according to Wikipedia this is hard-coded yv_bitrate = '700' # according to Wikipedia this is hard-coded
request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
# Extract media URL from playlist XML # Extract media URL from playlist XML
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract media URL') self._downloader.trouble(u'ERROR: Unable to extract media URL')
return return
video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
video_url = unescapeHTML(video_url) video_url = unescapeHTML(video_url)
return [{ return [{
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_url, 'url': video_url,
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': u'NA', 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'thumbnail': video_thumbnail.decode('utf-8'), 'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description, 'description': video_description,
'thumbnail': video_thumbnail, }]
'player_url': None,
}]
class VimeoIE(InfoExtractor): class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com.""" """Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs # _VALID_URL matches Vimeo URLs
_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)' _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
IE_NAME = u'vimeo' IE_NAME = u'vimeo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Retrieve video webpage to extract further information # Retrieve video webpage to extract further information
request = urllib2.Request(url, None, std_headers) request = compat_urllib_request.Request(url, None, std_headers)
try: try:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read() webpage_bytes = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: webpage = webpage_bytes.decode('utf-8')
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return
# Now we begin extracting as much information as we can from what we # Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors, # retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific. # and latter we extract those that are Vimeo specific.
self.report_extraction(video_id) self.report_extraction(video_id)
# Extract the config JSON # Extract the config JSON
config = webpage.split(' = {config:')[1].split(',assets:')[0] try:
try: config = webpage.split(' = {config:')[1].split(',assets:')[0]
config = json.loads(config) config = json.loads(config)
except: except:
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.trouble(u'ERROR: unable to extract info section')
return return
# Extract title
video_title = config["video"]["title"]
# Extract uploader # Extract title
video_uploader = config["video"]["owner"]["name"] video_title = config["video"]["title"]
# Extract video thumbnail # Extract uploader and uploader_id
video_thumbnail = config["video"]["thumbnail"] video_uploader = config["video"]["owner"]["name"]
video_uploader_id = config["video"]["owner"]["url"].split('/')[-1]
# Extract video description # Extract video thumbnail
video_description = get_element_by_id("description", webpage.decode('utf8')) video_thumbnail = config["video"]["thumbnail"]
if video_description: video_description = clean_html(video_description)
else: video_description = ''
# Extract upload date # Extract video description
video_upload_date = u'NA' video_description = get_element_by_attribute("itemprop", "description", webpage)
mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage) if video_description: video_description = clean_html(video_description)
if mobj is not None: else: video_description = ''
video_upload_date = mobj.group(1)
# Vimeo specific: extract request signature and timestamp # Extract upload date
sig = config['request']['signature'] video_upload_date = None
timestamp = config['request']['timestamp'] mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
# Vimeo specific: extract video codec and quality information # Vimeo specific: extract request signature and timestamp
# TODO bind to format param sig = config['request']['signature']
codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] timestamp = config['request']['timestamp']
for codec in codecs:
if codec[0] in config["video"]["files"]:
video_codec = codec[0]
video_extension = codec[1]
if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd'
else: quality = 'sd'
break
else:
self._downloader.trouble(u'ERROR: no known codec found')
return
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ # Vimeo specific: extract video codec and quality information
%(video_id, sig, timestamp, quality, video_codec.upper()) # First consider quality, then codecs, then take everything
# TODO bind to format param
codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
files = { 'hd': [], 'sd': [], 'other': []}
for codec_name, codec_extension in codecs:
if codec_name in config["video"]["files"]:
if 'hd' in config["video"]["files"][codec_name]:
files['hd'].append((codec_name, codec_extension, 'hd'))
elif 'sd' in config["video"]["files"][codec_name]:
files['sd'].append((codec_name, codec_extension, 'sd'))
else:
files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
return [{ for quality in ('hd', 'sd', 'other'):
'id': video_id, if len(files[quality]) > 0:
'url': video_url, video_quality = files[quality][0][2]
'uploader': video_uploader, video_codec = files[quality][0][0]
'upload_date': video_upload_date, video_extension = files[quality][0][1]
'title': video_title, self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
'ext': video_extension, break
'thumbnail': video_thumbnail, else:
'description': video_description, self._downloader.trouble(u'ERROR: no known codec found')
'player_url': None, return
}]
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, video_quality, video_codec.upper())
return [{
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': video_upload_date,
'title': video_title,
'ext': video_extension,
'thumbnail': video_thumbnail,
'description': video_description,
}]
class ArteTvIE(InfoExtractor):
"""arte.tv information extractor."""
_VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
_LIVE_URL = r'index-[0-9]+\.html$'
IE_NAME = u'arte.tv'
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
def fetch_webpage(self, url):
request = compat_urllib_request.Request(url)
try:
self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return
except ValueError as err:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
return webpage
def grep_webpage(self, url, regex, regexFlags, matchTuples):
page = self.fetch_webpage(url)
mobj = re.search(regex, page, regexFlags)
info = {}
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
for (i, key, err) in matchTuples:
if mobj.group(i) is None:
self._downloader.trouble(err)
return
else:
info[key] = mobj.group(i)
return info
def extractLiveStream(self, url):
video_lang = url.split('/')[-4]
info = self.grep_webpage(
url,
r'src="(.*?/videothek_js.*?\.js)',
0,
[
(1, 'url', u'ERROR: Invalid URL: %s' % url)
]
)
http_host = url.split('/')[2]
next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
info = self.grep_webpage(
next_url,
r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
'(http://.*?\.swf).*?' +
'(rtmp://.*?)\'',
re.DOTALL,
[
(1, 'path', u'ERROR: could not extract video path: %s' % url),
(2, 'player', u'ERROR: could not extract video player: %s' % url),
(3, 'url', u'ERROR: could not extract video url: %s' % url)
]
)
video_url = u'%s/%s' % (info.get('url'), info.get('path'))
def extractPlus7Stream(self, url):
video_lang = url.split('/')[-3]
info = self.grep_webpage(
url,
r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
0,
[
(1, 'url', u'ERROR: Invalid URL: %s' % url)
]
)
next_url = compat_urllib_parse.unquote(info.get('url'))
info = self.grep_webpage(
next_url,
r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
0,
[
(1, 'url', u'ERROR: Could not find <video> tag: %s' % url)
]
)
next_url = compat_urllib_parse.unquote(info.get('url'))
info = self.grep_webpage(
next_url,
r'<video id="(.*?)".*?>.*?' +
'<name>(.*?)</name>.*?' +
'<dateVideo>(.*?)</dateVideo>.*?' +
'<url quality="hd">(.*?)</url>',
re.DOTALL,
[
(1, 'id', u'ERROR: could not extract video id: %s' % url),
(2, 'title', u'ERROR: could not extract video title: %s' % url),
(3, 'date', u'ERROR: could not extract video date: %s' % url),
(4, 'url', u'ERROR: could not extract video url: %s' % url)
]
)
return {
'id': info.get('id'),
'url': compat_urllib_parse.unquote(info.get('url')),
'uploader': u'arte.tv',
'upload_date': info.get('date'),
'title': info.get('title').decode('utf-8'),
'ext': u'mp4',
'format': u'NA',
'player_url': None,
}
def _real_extract(self, url):
video_id = url.split('/')[-1]
self.report_extraction(video_id)
if re.search(self._LIVE_URL, video_id) is not None:
self.extractLiveStream(url)
return
else:
info = self.extractPlus7Stream(url)
return [info]
class GenericIE(InfoExtractor): class GenericIE(InfoExtractor):
"""Generic last-resort information extractor.""" """Generic last-resort information extractor."""
_VALID_URL = r'.*' _VALID_URL = r'.*'
IE_NAME = u'generic' IE_NAME = u'generic'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
def report_following_redirect(self, new_url): def report_following_redirect(self, new_url):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url):
"""Check if it is a redirect, like url shorteners, in case restart chain."""
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
class HEADRedirectHandler(urllib2.HTTPRedirectHandler): def _test_redirect(self, url):
""" """Check if it is a redirect, like url shorteners, in case restart chain."""
Subclass the HTTPRedirectHandler to make it use our class HeadRequest(compat_urllib_request.Request):
HeadRequest also on the redirected URL def get_method(self):
""" return "HEAD"
def redirect_request(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
return HeadRequest(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class HTTPMethodFallback(urllib2.BaseHandler): class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
""" """
Fallback to GET if HEAD is not allowed (405 HTTP error) Subclass the HTTPRedirectHandler to make it use our
""" HeadRequest also on the redirected URL
def http_error_405(self, req, fp, code, msg, headers): """
fp.read() def redirect_request(self, req, fp, code, msg, headers, newurl):
fp.close() if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
return HeadRequest(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
newheaders = dict((k,v) for k,v in req.headers.items() class HTTPMethodFallback(compat_urllib_request.BaseHandler):
if k.lower() not in ("content-length", "content-type")) """
return self.parent.open(urllib2.Request(req.get_full_url(), Fallback to GET if HEAD is not allowed (405 HTTP error)
headers=newheaders, """
origin_req_host=req.get_origin_req_host(), def http_error_405(self, req, fp, code, msg, headers):
unverifiable=True)) fp.read()
fp.close()
# Build our opener newheaders = dict((k,v) for k,v in req.headers.items()
opener = urllib2.OpenerDirector() if k.lower() not in ("content-length", "content-type"))
for handler in [urllib2.HTTPHandler, urllib2.HTTPDefaultErrorHandler, return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
HTTPMethodFallback, HEADRedirectHandler, headers=newheaders,
urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]: origin_req_host=req.get_origin_req_host(),
opener.add_handler(handler()) unverifiable=True))
response = opener.open(HeadRequest(url)) # Build our opener
new_url = response.geturl() opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
if url == new_url: return False HTTPMethodFallback, HEADRedirectHandler,
compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
self.report_following_redirect(new_url) opener.add_handler(handler())
self._downloader.download([new_url])
return True
def _real_extract(self, url): response = opener.open(HeadRequest(url))
if self._test_redirect(url): return new_url = response.geturl()
video_id = url.split('/')[-1] if url == new_url:
request = urllib2.Request(url) return False
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
except ValueError, err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
self.report_extraction(video_id) self.report_following_redirect(new_url)
# Start with something easy: JW Player in SWFObject self._downloader.download([new_url])
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) return True
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
# It's possible that one of the regexes def _real_extract(self, url):
# matched, but returned an empty group: if self._test_redirect(url): return
if mobj.group(1) is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
video_url = urllib.unquote(mobj.group(1)) video_id = url.split('/')[-1]
video_id = os.path.basename(video_url) request = compat_urllib_request.Request(url)
try:
self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return
except ValueError as err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
# here's a fun little line of code for you: self.report_extraction(video_id)
video_extension = os.path.splitext(video_id)[1][1:] # Start with something easy: JW Player in SWFObject
video_id = os.path.splitext(video_id)[0] mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
# it's tempting to parse this further, but you would # It's possible that one of the regexes
# have to take into account all the variations like # matched, but returned an empty group:
# Video Title - Site Name if mobj.group(1) is None:
# Site Name | Video Title self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
# Video Title - Tagline | Site Name return
# and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
# video uploader is domain name video_url = compat_urllib_parse.unquote(mobj.group(1))
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) video_id = os.path.basename(video_url)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_uploader = mobj.group(1).decode('utf-8')
return [{ # here's a fun little line of code for you:
'id': video_id.decode('utf-8'), video_extension = os.path.splitext(video_id)[1][1:]
'url': video_url.decode('utf-8'), video_id = os.path.splitext(video_id)[0]
'uploader': video_uploader,
'upload_date': u'NA', # it's tempting to parse this further, but you would
'title': video_title, # have to take into account all the variations like
'ext': video_extension.decode('utf-8'), # Video Title - Site Name
'format': u'NA', # Site Name | Video Title
'player_url': None, # Video Title - Tagline | Site Name
}] # and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1)
# video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_uploader = mobj.group(1)
return [{
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'upload_date': None,
'title': video_title,
'ext': video_extension,
}]
class YoutubeSearchIE(InfoExtractor): class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries.""" """Information Extractor for YouTube search queries."""
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_max_youtube_results = 1000 _max_youtube_results = 1000
IE_NAME = u'youtube:search' IE_NAME = u'youtube:search'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum): def report_download_page(self, query, pagenum):
"""Report attempt to download search page with given number.""" """Report attempt to download search page with given number."""
query = query.decode(preferredencoding()) query = query.decode(preferredencoding())
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_youtube_results) self._download_n_results(query, self._max_youtube_results)
return return
else: else:
try: try:
n = long(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_youtube_results: elif n > self._max_youtube_results:
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results n = self._max_youtube_results
self._download_n_results(query, n) self._download_n_results(query, n)
return return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
limit = n limit = n
while (50 * pagenum) < limit: while (50 * pagenum) < limit:
self.report_download_page(query, pagenum+1) self.report_download_page(query, pagenum+1)
result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1) result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
request = urllib2.Request(result_url) request = compat_urllib_request.Request(result_url)
try: try:
data = urllib2.urlopen(request).read() data = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
return return
api_response = json.loads(data)['data'] api_response = json.loads(data)['data']
new_ids = list(video['id'] for video in api_response['items']) new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids video_ids += new_ids
limit = min(n, api_response['totalItems']) limit = min(n, api_response['totalItems'])
pagenum += 1 pagenum += 1
if len(video_ids) > n: if len(video_ids) > n:
video_ids = video_ids[:n] video_ids = video_ids[:n]
for id in video_ids: for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return return
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries.""" """Information Extractor for Google Video search queries."""
_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
_VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)' _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
_MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"' _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
_max_google_results = 1000 _max_google_results = 1000
IE_NAME = u'video.google:search' IE_NAME = u'video.google:search'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, query, pagenum): def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding()) query = query.decode(preferredencoding())
self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum)) self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return return
prefix, query = query.split(':') prefix, query = query.split(':')
prefix = prefix[8:] prefix = prefix[8:]
query = query.encode('utf-8') query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_google_results) self._download_n_results(query, self._max_google_results)
return return
else: else:
try: try:
n = long(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return return
elif n > self._max_google_results: elif n > self._max_google_results:
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results n = self._max_google_results
self._download_n_results(query, n) self._download_n_results(query, n)
return return
except ValueError: # parsing prefix as integer fails except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1) self._download_n_results(query, 1)
return return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
while True: while True:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum*10) result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10)
request = urllib2.Request(result_url) request = compat_urllib_request.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() page = compat_urllib_request.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1) video_id = mobj.group(1)
if video_id not in video_ids: if video_id not in video_ids:
video_ids.append(video_id) video_ids.append(video_id)
if len(video_ids) == n: if len(video_ids) == n:
# Specified n videos reached # Specified n videos reached
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids: for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return return
pagenum = pagenum + 1 pagenum = pagenum + 1
class YahooSearchIE(InfoExtractor): class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries.""" """Information Extractor for Yahoo! Video search queries."""
_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
_MORE_PAGES_INDICATOR = r'\s*Next'
_max_yahoo_results = 1000
IE_NAME = u'video.yahoo:search'
def __init__(self, downloader=None): _WORKING = False
InfoExtractor.__init__(self, downloader) _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
_MORE_PAGES_INDICATOR = r'\s*Next'
_max_yahoo_results = 1000
IE_NAME = u'video.yahoo:search'
def report_download_page(self, query, pagenum): def __init__(self, downloader=None):
"""Report attempt to download playlist page with given number.""" InfoExtractor.__init__(self, downloader)
query = query.decode(preferredencoding())
self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def report_download_page(self, query, pagenum):
mobj = re.match(self._VALID_URL, query) """Report attempt to download playlist page with given number."""
if mobj is None: query = query.decode(preferredencoding())
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
return
prefix, query = query.split(':') def _real_extract(self, query):
prefix = prefix[8:] mobj = re.match(self._VALID_URL, query)
query = query.encode('utf-8') if mobj is None:
if prefix == '': self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
self._download_n_results(query, 1) return
return
elif prefix == 'all':
self._download_n_results(query, self._max_yahoo_results)
return
else:
try:
n = long(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
self._download_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
def _download_n_results(self, query, n): prefix, query = query.split(':')
"""Downloads a specified number of results for a query""" prefix = prefix[8:]
query = query.encode('utf-8')
if prefix == '':
self._download_n_results(query, 1)
return
elif prefix == 'all':
self._download_n_results(query, self._max_yahoo_results)
return
else:
try:
n = int(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
self._download_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
video_ids = [] def _download_n_results(self, query, n):
already_seen = set() """Downloads a specified number of results for a query"""
pagenum = 1
while True: video_ids = []
self.report_download_page(query, pagenum) already_seen = set()
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) pagenum = 1
request = urllib2.Request(result_url)
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return
# Extract video identifiers while True:
for mobj in re.finditer(self._VIDEO_INDICATOR, page): self.report_download_page(query, pagenum)
video_id = mobj.group(1) result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum)
if video_id not in already_seen: request = compat_urllib_request.Request(result_url)
video_ids.append(video_id) try:
already_seen.add(video_id) page = compat_urllib_request.urlopen(request).read()
if len(video_ids) == n: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
# Specified n videos reached self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
for id in video_ids: return
self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: # Extract video identifiers
for id in video_ids: for mobj in re.finditer(self._VIDEO_INDICATOR, page):
self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) video_id = mobj.group(1)
return if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return
pagenum = pagenum + 1 if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids:
self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
return
pagenum = pagenum + 1
class YoutubePlaylistIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists.""" """Information Extractor for YouTube playlists."""
_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s' _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s'
_MORE_PAGES_INDICATOR = r'yt-uix-pager-next' _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, playlist_id, pagenum): def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract playlist id # Extract playlist id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
# Single video case # Single video case
if mobj.group(3) is not None: if mobj.group(3) is not None:
self._downloader.download([mobj.group(3)]) self._downloader.download([mobj.group(3)])
return return
# Download playlist pages # Download playlist pages
# prefix is 'p' as default for playlists but there are other types that need extra care # prefix is 'p' as default for playlists but there are other types that need extra care
playlist_prefix = mobj.group(1) playlist_prefix = mobj.group(1)
if playlist_prefix == 'a': if playlist_prefix == 'a':
playlist_access = 'artist' playlist_access = 'artist'
else: else:
playlist_prefix = 'p' playlist_prefix = 'p'
playlist_access = 'view_play_list' playlist_access = 'view_play_list'
playlist_id = mobj.group(2) playlist_id = mobj.group(2)
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(playlist_id, pagenum) self.report_download_page(playlist_id, pagenum)
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = urllib2.urlopen(request).read() page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if self._MORE_PAGES_INDICATOR not in page:
break break
pagenum = pagenum + 1 pagenum = pagenum + 1
playliststart = self._downloader.params.get('playliststart', 1) - 1 total = len(video_ids)
playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
for id in video_ids: playliststart = self._downloader.params.get('playliststart', 1) - 1
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) playlistend = self._downloader.params.get('playlistend', -1)
return if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
if len(video_ids) == total:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
else:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return
class YoutubeChannelIE(InfoExtractor): class YoutubeChannelIE(InfoExtractor):
"""Information Extractor for YouTube channels.""" """Information Extractor for YouTube channels."""
_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$" _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
_TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
_MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
IE_NAME = u'youtube:channel' IE_NAME = u'youtube:channel'
def report_download_page(self, channel_id, pagenum): def report_download_page(self, channel_id, pagenum):
"""Report attempt to download channel page with given number.""" """Report attempt to download channel page with given number."""
self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum)) self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract channel id # Extract channel id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
# Download channel pages # Download channel pages
channel_id = mobj.group(1) channel_id = mobj.group(1)
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(channel_id, pagenum) self.report_download_page(channel_id, pagenum)
url = self._TEMPLATE_URL % (channel_id, pagenum) url = self._TEMPLATE_URL % (channel_id, pagenum)
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = urllib2.urlopen(request).read() page = compat_urllib_request.urlopen(request).read().decode('utf8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page): for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
if re.search(self._MORE_PAGES_INDICATOR, page) is None: if self._MORE_PAGES_INDICATOR not in page:
break break
pagenum = pagenum + 1 pagenum = pagenum + 1
for id in video_ids: self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return for id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
return
class YoutubeUserIE(InfoExtractor): class YoutubeUserIE(InfoExtractor):
"""Information Extractor for YouTube users.""" """Information Extractor for YouTube users."""
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50 _GDATA_PAGE_SIZE = 50
_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
IE_NAME = u'youtube:user' IE_NAME = u'youtube:user'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, username, start_index): def report_download_page(self, username, start_index):
"""Report attempt to download user page.""" """Report attempt to download user page."""
self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
(username, start_index, start_index + self._GDATA_PAGE_SIZE)) (username, start_index, start_index + self._GDATA_PAGE_SIZE))
def _real_extract(self, url): def _real_extract(self, url):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
# Download video ids using YouTube Data API. Result size per # Download video ids using YouTube Data API. Result size per
# query is limited (currently to 50 videos) so we need to query # query is limited (currently to 50 videos) so we need to query
# page by page until there are no video ids - it means we got # page by page until there are no video ids - it means we got
# all of them. # all of them.
video_ids = [] video_ids = []
pagenum = 0 pagenum = 0
while True: while True:
start_index = pagenum * self._GDATA_PAGE_SIZE + 1 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
self.report_download_page(username, start_index) self.report_download_page(username, start_index)
request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
try: try:
page = urllib2.urlopen(request).read() page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(self._VIDEO_INDICATOR, page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1)) ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
# A little optimization - if current page is not # A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then # "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there # we can assume that this page is the last one - there
# are no more ids on further pages - no need to query # are no more ids on further pages - no need to query
# again. # again.
if len(ids_in_page) < self._GDATA_PAGE_SIZE: if len(ids_in_page) < self._GDATA_PAGE_SIZE:
break break
pagenum += 1 pagenum += 1
all_ids_count = len(video_ids) all_ids_count = len(video_ids)
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] video_ids = video_ids[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] video_ids = video_ids[playliststart:playlistend]
self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
(username, all_ids_count, len(video_ids))) (username, all_ids_count, len(video_ids)))
for video_id in video_ids: for video_id in video_ids:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id]) self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
class BlipTVUserIE(InfoExtractor): class BlipTVUserIE(InfoExtractor):
"""Information Extractor for blip.tv users.""" """Information Extractor for blip.tv users."""
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
_PAGE_SIZE = 12 _PAGE_SIZE = 12
IE_NAME = u'blip.tv:user' IE_NAME = u'blip.tv:user'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_page(self, username, pagenum): def report_download_page(self, username, pagenum):
"""Report attempt to download user page.""" """Report attempt to download user page."""
self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' % self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
(self.IE_NAME, username, pagenum)) (self.IE_NAME, username, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract username # Extract username
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
username = mobj.group(1) username = mobj.group(1)
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
request = urllib2.Request(url) request = compat_urllib_request.Request(url)
try: try:
page = urllib2.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
mobj = re.search(r'data-users-id="([^"]+)"', page) mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1) page_base = page_base % mobj.group(1)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Download video ids using BlipTV Ajax calls. Result size per # Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query # query is limited (currently to 12 videos) so we need to query
# page by page until there are no video ids - it means we got # page by page until there are no video ids - it means we got
# all of them. # all of them.
video_ids = [] video_ids = []
pagenum = 1 pagenum = 1
while True: while True:
self.report_download_page(username, pagenum) self.report_download_page(username, pagenum)
request = urllib2.Request( page_base + "&page=" + str(pagenum) ) request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
try: try:
page = urllib2.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return return
# Extract video identifiers # Extract video identifiers
ids_in_page = [] ids_in_page = []
for mobj in re.finditer(r'href="/([^"]+)"', page): for mobj in re.finditer(r'href="/([^"]+)"', page):
if mobj.group(1) not in ids_in_page: if mobj.group(1) not in ids_in_page:
ids_in_page.append(unescapeHTML(mobj.group(1))) ids_in_page.append(unescapeHTML(mobj.group(1)))
video_ids.extend(ids_in_page) video_ids.extend(ids_in_page)
# A little optimization - if current page is not # A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then # "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there # we can assume that this page is the last one - there
# are no more ids on further pages - no need to query # are no more ids on further pages - no need to query
# again. # again.
if len(ids_in_page) < self._PAGE_SIZE: if len(ids_in_page) < self._PAGE_SIZE:
break break
pagenum += 1 pagenum += 1
all_ids_count = len(video_ids) all_ids_count = len(video_ids)
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] video_ids = video_ids[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] video_ids = video_ids[playliststart:playlistend]
self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" % self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
(self.IE_NAME, username, all_ids_count, len(video_ids))) (self.IE_NAME, username, all_ids_count, len(video_ids)))
for video_id in video_ids: for video_id in video_ids:
self._downloader.download([u'http://blip.tv/'+video_id]) self._downloader.download([u'http://blip.tv/'+video_id])
class DepositFilesIE(InfoExtractor): class DepositFilesIE(InfoExtractor):
"""Information extractor for depositfiles.com""" """Information extractor for depositfiles.com"""
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
IE_NAME = u'DepositFiles'
def __init__(self, downloader=None): def report_download_webpage(self, file_id):
InfoExtractor.__init__(self, downloader) """Report webpage download."""
self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
def report_download_webpage(self, file_id): def report_extraction(self, file_id):
"""Report webpage download.""" """Report information extraction."""
self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
def report_extraction(self, file_id): def _real_extract(self, url):
"""Report information extraction.""" file_id = url.split('/')[-1]
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) # Rebuild url in english locale
url = 'http://depositfiles.com/en/files/' + file_id
def _real_extract(self, url): # Retrieve file webpage with 'Free download' button pressed
file_id = url.split('/')[-1] free_download_indication = { 'gateway_result' : '1' }
# Rebuild url in english locale request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
url = 'http://depositfiles.com/en/files/' + file_id try:
self.report_download_webpage(file_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
return
# Retrieve file webpage with 'Free download' button pressed # Search for the real file URL
free_download_indication = { 'gateway_result' : '1' } mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
request = urllib2.Request(url, urllib.urlencode(free_download_indication)) if (mobj is None) or (mobj.group(1) is None):
try: # Try to figure out reason of the error.
self.report_download_webpage(file_id) mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
webpage = urllib2.urlopen(request).read() if (mobj is not None) and (mobj.group(1) is not None):
except (urllib2.URLError, httplib.HTTPException, socket.error), err: restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: %s' % restriction_message)
return else:
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
return
# Search for the real file URL file_url = mobj.group(1)
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) file_extension = os.path.splitext(file_url)[1][1:]
if (mobj is None) or (mobj.group(1) is None):
# Try to figure out reason of the error.
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
self._downloader.trouble(u'ERROR: %s' % restriction_message)
else:
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
return
file_url = mobj.group(1) # Search for file title
file_extension = os.path.splitext(file_url)[1][1:] mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
file_title = mobj.group(1).decode('utf-8')
# Search for file title return [{
mobj = re.search(r'<b title="(.*?)">', webpage) 'id': file_id.decode('utf-8'),
if mobj is None: 'url': file_url.decode('utf-8'),
self._downloader.trouble(u'ERROR: unable to extract title') 'uploader': None,
return 'upload_date': None,
file_title = mobj.group(1).decode('utf-8') 'title': file_title,
'ext': file_extension.decode('utf-8'),
return [{ }]
'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'),
'uploader': u'NA',
'upload_date': u'NA',
'title': file_title,
'ext': file_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
}]
class FacebookIE(InfoExtractor): class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook""" """Information Extractor for Facebook"""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' _WORKING = False
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_NETRC_MACHINE = 'facebook' _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_available_formats = ['video', 'highqual', 'lowqual'] _NETRC_MACHINE = 'facebook'
_video_extensions = { _available_formats = ['video', 'highqual', 'lowqual']
'video': 'mp4', _video_extensions = {
'highqual': 'mp4', 'video': 'mp4',
'lowqual': 'mp4', 'highqual': 'mp4',
} 'lowqual': 'mp4',
IE_NAME = u'facebook' }
IE_NAME = u'facebook'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def _reporter(self, message): def _reporter(self, message):
"""Add header and report message.""" """Add header and report message."""
self._downloader.to_screen(u'[facebook] %s' % message) self._downloader.to_screen(u'[facebook] %s' % message)
def report_login(self): def report_login(self):
"""Report attempt to log in.""" """Report attempt to log in."""
self._reporter(u'Logging in') self._reporter(u'Logging in')
def report_video_webpage_download(self, video_id): def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage.""" """Report attempt to download video webpage."""
self._reporter(u'%s: Downloading video webpage' % video_id) self._reporter(u'%s: Downloading video webpage' % video_id)
def report_information_extraction(self, video_id): def report_information_extraction(self, video_id):
"""Report attempt to extract video information.""" """Report attempt to extract video information."""
self._reporter(u'%s: Extracting video information' % video_id) self._reporter(u'%s: Extracting video information' % video_id)
def _parse_page(self, video_webpage): def _parse_page(self, video_webpage):
"""Extract video information from page""" """Extract video information from page"""
# General data # General data
data = {'title': r'\("video_title", "(.*?)"\)', data = {'title': r'\("video_title", "(.*?)"\)',
'description': r'<div class="datawrap">(.*?)</div>', 'description': r'<div class="datawrap">(.*?)</div>',
'owner': r'\("video_owner_name", "(.*?)"\)', 'owner': r'\("video_owner_name", "(.*?)"\)',
'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)', 'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
} }
video_info = {} video_info = {}
for piece in data.keys(): for piece in data.keys():
mobj = re.search(data[piece], video_webpage) mobj = re.search(data[piece], video_webpage)
if mobj is not None: if mobj is not None:
video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
# Video urls # Video urls
video_urls = {} video_urls = {}
for fmt in self._available_formats: for fmt in self._available_formats:
mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
if mobj is not None: if mobj is not None:
# URL is in a Javascript segment inside an escaped Unicode format within # URL is in a Javascript segment inside an escaped Unicode format within
# the generally utf-8 page # the generally utf-8 page
video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
video_info['video_urls'] = video_urls video_info['video_urls'] = video_urls
return video_info return video_info
def _real_initialize(self): def _real_initialize(self):
if self._downloader is None: if self._downloader is None:
return return
useremail = None useremail = None
password = None password = None
downloader_params = self._downloader.params downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data # Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None: if downloader_params.get('username', None) is not None:
useremail = downloader_params['username'] useremail = downloader_params['username']
password = downloader_params['password'] password = downloader_params['password']
elif downloader_params.get('usenetrc', False): elif downloader_params.get('usenetrc', False):
try: try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE) info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None: if info is not None:
useremail = info[0] useremail = info[0]
password = info[2] password = info[2]
else: else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err: except (IOError, netrc.NetrcParseError) as err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
return return
if useremail is None: if useremail is None:
return return
# Log in # Log in
login_form = { login_form = {
'email': useremail, 'email': useremail,
'pass': password, 'pass': password,
'login': 'Log+In' 'login': 'Log+In'
} }
request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
try: try:
self.report_login() self.report_login()
login_results = urllib2.urlopen(request).read() login_results = compat_urllib_request.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return return
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
return return
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group('ID') video_id = mobj.group('ID')
# Get video webpage # Get video webpage
self.report_video_webpage_download(video_id) self.report_video_webpage_download(video_id)
request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
try: try:
page = urllib2.urlopen(request) page = compat_urllib_request.urlopen(request)
video_webpage = page.read() video_webpage = page.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return return
# Start extracting information # Start extracting information
self.report_information_extraction(video_id) self.report_information_extraction(video_id)
# Extract information # Extract information
video_info = self._parse_page(video_webpage) video_info = self._parse_page(video_webpage)
# uploader # uploader
if 'owner' not in video_info: if 'owner' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname') self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return return
video_uploader = video_info['owner'] video_uploader = video_info['owner']
# title # title
if 'title' not in video_info: if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = video_info['title'] video_title = video_info['title']
video_title = video_title.decode('utf-8') video_title = video_title.decode('utf-8')
# thumbnail image # thumbnail image
if 'thumbnail' not in video_info: if 'thumbnail' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail') self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = '' video_thumbnail = ''
else: else:
video_thumbnail = video_info['thumbnail'] video_thumbnail = video_info['thumbnail']
# upload date # upload date
upload_date = u'NA' upload_date = None
if 'upload_date' in video_info: if 'upload_date' in video_info:
upload_time = video_info['upload_date'] upload_time = video_info['upload_date']
timetuple = email.utils.parsedate_tz(upload_time) timetuple = email.utils.parsedate_tz(upload_time)
if timetuple is not None: if timetuple is not None:
try: try:
upload_date = time.strftime('%Y%m%d', timetuple[0:9]) upload_date = time.strftime('%Y%m%d', timetuple[0:9])
except: except:
pass pass
# description # description
video_description = video_info.get('description', 'No description available.') video_description = video_info.get('description', 'No description available.')
url_map = video_info['video_urls'] url_map = video_info['video_urls']
if len(url_map.keys()) > 0: if url_map:
# Decide which formats to download # Decide which formats to download
req_format = self._downloader.params.get('format', None) req_format = self._downloader.params.get('format', None)
format_limit = self._downloader.params.get('format_limit', None) format_limit = self._downloader.params.get('format_limit', None)
if format_limit is not None and format_limit in self._available_formats: if format_limit is not None and format_limit in self._available_formats:
format_list = self._available_formats[self._available_formats.index(format_limit):] format_list = self._available_formats[self._available_formats.index(format_limit):]
else: else:
format_list = self._available_formats format_list = self._available_formats
existing_formats = [x for x in format_list if x in url_map] existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0: if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video') self._downloader.trouble(u'ERROR: no known formats available for video')
return return
if req_format is None: if req_format is None:
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst': elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
elif req_format == '-1': elif req_format == '-1':
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else: else:
# Specific format # Specific format
if req_format not in url_map: if req_format not in url_map:
self._downloader.trouble(u'ERROR: requested format not available') self._downloader.trouble(u'ERROR: requested format not available')
return return
video_url_list = [(req_format, url_map[req_format])] # Specific format video_url_list = [(req_format, url_map[req_format])] # Specific format
results = [] results = []
for format_param, video_real_url in video_url_list: for format_param, video_real_url in video_url_list:
# Extension # Extension
video_extension = self._video_extensions.get(format_param, 'mp4') video_extension = self._video_extensions.get(format_param, 'mp4')
results.append({ results.append({
'id': video_id.decode('utf-8'), 'id': video_id.decode('utf-8'),
'url': video_real_url.decode('utf-8'), 'url': video_real_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'),
'upload_date': upload_date, 'upload_date': upload_date,
'title': video_title, 'title': video_title,
'ext': video_extension.decode('utf-8'), 'ext': video_extension.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': video_thumbnail.decode('utf-8'), 'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description.decode('utf-8'), 'description': video_description.decode('utf-8'),
'player_url': None, })
}) return results
return results
class BlipTVIE(InfoExtractor): class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv""" """Information extractor for blip.tv"""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
_URL_EXT = r'^.*\.([a-z0-9]+)$' _URL_EXT = r'^.*\.([a-z0-9]+)$'
IE_NAME = u'blip.tv' IE_NAME = u'blip.tv'
def report_extraction(self, file_id): def report_extraction(self, file_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def report_direct_download(self, title): def report_direct_download(self, title):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if '?' in url: if '?' in url:
cchar = '&' cchar = '&'
else: else:
cchar = '?' cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1' json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = urllib2.Request(json_url.encode('utf-8')) request = compat_urllib_request.Request(json_url)
self.report_extraction(mobj.group(1)) self.report_extraction(mobj.group(1))
info = None info = None
try: try:
urlh = urllib2.urlopen(request) urlh = compat_urllib_request.urlopen(request)
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
basename = url.split('/')[-1] basename = url.split('/')[-1]
title,ext = os.path.splitext(basename) title,ext = os.path.splitext(basename)
title = title.decode('UTF-8') title = title.decode('UTF-8')
ext = ext.replace('.', '') ext = ext.replace('.', '')
self.report_direct_download(title) self.report_direct_download(title)
info = { info = {
'id': title, 'id': title,
'url': url, 'url': url,
'title': title, 'uploader': None,
'ext': ext, 'upload_date': None,
'urlhandle': urlh 'title': title,
} 'ext': ext,
except (urllib2.URLError, httplib.HTTPException, socket.error), err: 'urlhandle': urlh
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) }
return except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if info is None: # Regular URL self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
try: return
json_code = urlh.read() if info is None: # Regular URL
except (urllib2.URLError, httplib.HTTPException, socket.error), err: try:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err)) json_code_bytes = urlh.read()
return json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
return
try: try:
json_data = json.loads(json_code) json_data = json.loads(json_code)
if 'Post' in json_data: if 'Post' in json_data:
data = json_data['Post'] data = json_data['Post']
else: else:
data = json_data data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url'] video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url) umobj = re.match(self._URL_EXT, video_url)
if umobj is None: if umobj is None:
raise ValueError('Can not determine filename extension') raise ValueError('Can not determine filename extension')
ext = umobj.group(1) ext = umobj.group(1)
info = { info = {
'id': data['item_id'], 'id': data['item_id'],
'url': video_url, 'url': video_url,
'uploader': data['display_name'], 'uploader': data['display_name'],
'upload_date': upload_date, 'upload_date': upload_date,
'title': data['title'], 'title': data['title'],
'ext': ext, 'ext': ext,
'format': data['media']['mimeType'], 'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'], 'thumbnail': data['thumbnailUrl'],
'description': data['description'], 'description': data['description'],
'player_url': data['embedUrl'] 'player_url': data['embedUrl']
} }
except (ValueError,KeyError), err: except (ValueError,KeyError) as err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return return
std_headers['User-Agent'] = 'iTunes/10.6.1' std_headers['User-Agent'] = 'iTunes/10.6.1'
return [info] return [info]
class MyVideoIE(InfoExtractor): class MyVideoIE(InfoExtractor):
"""Information Extractor for myvideo.de.""" """Information Extractor for myvideo.de."""
_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
IE_NAME = u'myvideo' IE_NAME = u'myvideo'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
def _real_extract(self,url): def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._download.trouble(u'ERROR: invalid URL: %s' % url) self._download.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1) video_id = mobj.group(1)
# Get video webpage # Get video webpage
request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id) webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
try: webpage = self._download_webpage(webpage_url, video_id)
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
self.report_extraction(video_id) self.report_extraction(video_id)
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />', mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
webpage) webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL') self._downloader.trouble(u'ERROR: unable to extract media URL')
return return
video_url = mobj.group(1) + ('/%s.flv' % video_id) video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage) mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title') self._downloader.trouble(u'ERROR: unable to extract title')
return return
video_title = mobj.group(1) video_title = mobj.group(1)
return [{ return [{
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': u'NA', 'uploader': None,
'upload_date': u'NA', 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': u'flv', 'ext': u'flv',
'format': u'NA', }]
'player_url': None,
}]
class ComedyCentralIE(InfoExtractor): class ComedyCentralIE(InfoExtractor):
"""Information extractor for The Daily Show and Colbert Report """ """Information extractor for The Daily Show and Colbert Report """
_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$' # urls can be abbreviations like :thedailyshow or :colbert
IE_NAME = u'comedycentral' # urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|(https?://)?(www\.)?
(?P<showname>thedailyshow|colbertnation)\.com/
(full-episodes/(?P<episode>.*)|
(?P<clip>
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
$"""
IE_NAME = u'comedycentral'
def report_extraction(self, episode_id): _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
def report_config_download(self, episode_id): _video_extensions = {
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) '3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': '1280x720',
'2200': '960x540',
'1700': '768x432',
'1200': '640x360',
'750': '512x288',
'400': '384x216',
}
def report_index_download(self, episode_id): def suitable(self, url):
self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) """Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url, re.VERBOSE) is not None
def report_player_url(self, episode_id): def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
def _real_extract(self, url): def report_config_download(self, episode_id):
mobj = re.match(self._VALID_URL, url) self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
if mobj.group('shortname'): def report_index_download(self, episode_id):
if mobj.group('shortname') in ('tds', 'thedailyshow'): self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
url = u'http://www.thedailyshow.com/full-episodes/'
else:
url = u'http://www.colbertnation.com/full-episodes/'
mobj = re.match(self._VALID_URL, url)
assert mobj is not None
dlNewest = not mobj.group('episode') def report_player_url(self, episode_id):
if dlNewest: self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
req = urllib2.Request(url)
self.report_extraction(epTitle)
try:
htmlHandle = urllib2.urlopen(req)
html = htmlHandle.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
return
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
return
if mobj.group('episode') == '':
self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
return
epTitle = mobj.group('episode')
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html) def _print_formats(self, formats):
if len(mMovieParams) == 0: print('Available formats:')
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) for x in formats:
return print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
playerUrl_raw = mMovieParams[0][0]
self.report_player_url(epTitle)
try:
urlHandle = urllib2.urlopen(playerUrl_raw)
playerUrl = urlHandle.geturl()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
return
uri = mMovieParams[0][1] def _real_extract(self, url):
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + urllib.urlencode({'uri': uri}) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
self.report_index_download(epTitle) if mobj is None:
try: self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
indexXml = urllib2.urlopen(indexUrl).read() return
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
return
results = [] if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = u'http://www.thedailyshow.com/full-episodes/'
else:
url = u'http://www.colbertnation.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
idoc = xml.etree.ElementTree.fromstring(indexXml) if mobj.group('clip'):
itemEls = idoc.findall('.//item') if mobj.group('showname') == 'thedailyshow':
for itemEl in itemEls: epTitle = mobj.group('tdstitle')
mediaId = itemEl.findall('./guid')[0].text else:
shortMediaId = mediaId.split(':')[-1] epTitle = mobj.group('cntitle')
showId = mediaId.split(':')[-2].replace('.com', '') dlNewest = False
officialTitle = itemEl.findall('./title')[0].text else:
officialDate = itemEl.findall('./pubDate')[0].text dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' + req = compat_urllib_request.Request(url)
urllib.urlencode({'uri': mediaId})) self.report_extraction(epTitle)
configReq = urllib2.Request(configUrl) try:
self.report_config_download(epTitle) htmlHandle = compat_urllib_request.urlopen(req)
try: html = htmlHandle.read()
configXml = urllib2.urlopen(configReq).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
except (urllib2.URLError, httplib.HTTPException, socket.error), err: self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err)) return
return if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
return
if mobj.group('episode') == '':
self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
return
epTitle = mobj.group('episode')
cdoc = xml.etree.ElementTree.fromstring(configXml) mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html)
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
if len(turls) == 0: if len(mMovieParams) == 0:
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') # The Colbert Report embeds the information in a without
continue # a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
# For now, just pick the highest bitrate altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html)
format,video_url = turls[-1] if len(altMovieParams) == 0:
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
return
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
effTitle = showId + u'-' + epTitle playerUrl_raw = mMovieParams[0][0]
info = { self.report_player_url(epTitle)
'id': shortMediaId, try:
'url': video_url, urlHandle = compat_urllib_request.urlopen(playerUrl_raw)
'uploader': showId, playerUrl = urlHandle.geturl()
'upload_date': officialDate, except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
'title': effTitle, self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
'ext': 'mp4', return
'format': format,
'thumbnail': None,
'description': officialTitle,
'player_url': playerUrl
}
results.append(info) uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
return results self.report_index_download(epTitle)
try:
indexXml = compat_urllib_request.urlopen(indexUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
return
results = []
idoc = xml.etree.ElementTree.fromstring(indexXml)
itemEls = idoc.findall('.//item')
for itemEl in itemEls:
mediaId = itemEl.findall('./guid')[0].text
shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '')
officialTitle = itemEl.findall('./title')[0].text
officialDate = itemEl.findall('./pubDate')[0].text
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
compat_urllib_parse.urlencode({'uri': mediaId}))
configReq = compat_urllib_request.Request(configUrl)
self.report_config_download(epTitle)
try:
configXml = compat_urllib_request.urlopen(configReq).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return
cdoc = xml.etree.ElementTree.fromstring(configXml)
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
if len(turls) == 0:
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
continue
if self._downloader.params.get('listformats', None):
self._print_formats([i[0] for i in turls])
return
# For now, just pick the highest bitrate
format,video_url = turls[-1]
# Get the format arg from the arg stream
req_format = self._downloader.params.get('format', None)
# Select format if we can find one
for f,v in turls:
if f == req_format:
format, video_url = f, v
break
# Patch to download from alternative CDN, which does not
# break on current RTMPDump builds
broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/"
better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/"
if video_url.startswith(broken_cdn):
video_url = video_url.replace(broken_cdn, better_cdn)
effTitle = showId + u'-' + epTitle
info = {
'id': shortMediaId,
'url': video_url,
'uploader': showId,
'upload_date': officialDate,
'title': effTitle,
'ext': 'mp4',
'format': format,
'thumbnail': None,
'description': officialTitle,
'player_url': None #playerUrl
}
results.append(info)
return results
class EscapistIE(InfoExtractor): class EscapistIE(InfoExtractor):
"""Information extractor for The Escapist """ """Information extractor for The Escapist """
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
IE_NAME = u'escapist' IE_NAME = u'escapist'
def report_extraction(self, showName): def report_extraction(self, showName):
self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
def report_config_download(self, showName): def report_config_download(self, showName):
self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
showName = mobj.group('showname') showName = mobj.group('showname')
videoId = mobj.group('episode') videoId = mobj.group('episode')
self.report_extraction(showName) self.report_extraction(showName)
try: try:
webPage = urllib2.urlopen(url) webPage = compat_urllib_request.urlopen(url)
webPageBytes = webPage.read() webPageBytes = webPage.read()
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err)) self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
return return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
description = unescapeHTML(descMatch.group(1)) description = unescapeHTML(descMatch.group(1))
imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage) imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
imgUrl = unescapeHTML(imgMatch.group(1)) imgUrl = unescapeHTML(imgMatch.group(1))
playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage) playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
playerUrl = unescapeHTML(playerUrlMatch.group(1)) playerUrl = unescapeHTML(playerUrlMatch.group(1))
configUrlMatch = re.search('config=(.*)$', playerUrl) configUrlMatch = re.search('config=(.*)$', playerUrl)
configUrl = urllib2.unquote(configUrlMatch.group(1)) configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1))
self.report_config_download(showName) self.report_config_download(showName)
try: try:
configJSON = urllib2.urlopen(configUrl).read() configJSON = compat_urllib_request.urlopen(configUrl)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err)) configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
return except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
return
# Technically, it's JavaScript, not JSON # Technically, it's JavaScript, not JSON
configJSON = configJSON.replace("'", '"') configJSON = configJSON.replace("'", '"')
try: try:
config = json.loads(configJSON) config = json.loads(configJSON)
except (ValueError,), err: except (ValueError,) as err:
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err)) self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
return return
playlist = config['playlist'] playlist = config['playlist']
videoUrl = playlist[1]['url'] videoUrl = playlist[1]['url']
info = { info = {
'id': videoId, 'id': videoId,
'url': videoUrl, 'url': videoUrl,
'uploader': showName, 'uploader': showName,
'upload_date': None, 'upload_date': None,
'title': showName, 'title': showName,
'ext': 'flv', 'ext': 'flv',
'format': 'flv', 'thumbnail': imgUrl,
'thumbnail': imgUrl, 'description': description,
'description': description, 'player_url': playerUrl,
'player_url': playerUrl, }
}
return [info] return [info]
class CollegeHumorIE(InfoExtractor): class CollegeHumorIE(InfoExtractor):
"""Information extractor for collegehumor.com""" """Information extractor for collegehumor.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$' _WORKING = False
IE_NAME = u'collegehumor' _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
IE_NAME = u'collegehumor'
def report_webpage(self, video_id): def report_manifest(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading XML manifest' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
self.report_webpage(video_id) info = {
request = urllib2.Request(url) 'id': video_id,
try: 'uploader': None,
webpage = urllib2.urlopen(request).read() 'upload_date': None,
except (urllib2.URLError, httplib.HTTPException, socket.error), err: }
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage) self.report_extraction(video_id)
if m is None: xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
self._downloader.trouble(u'ERROR: Cannot extract internal video ID') try:
return metaXml = compat_urllib_request.urlopen(xmlUrl).read()
internal_video_id = m.group('internalvideoid') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return
info = { mdoc = xml.etree.ElementTree.fromstring(metaXml)
'id': video_id, try:
'internal_id': internal_video_id, videoNode = mdoc.findall('./video')[0]
} info['description'] = videoNode.findall('./description')[0].text
info['title'] = videoNode.findall('./caption')[0].text
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
manifest_url = videoNode.findall('./file')[0].text
except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return
self.report_extraction(video_id) manifest_url += '?hdcore=2.10.3'
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id self.report_manifest(video_id)
try: try:
metaXml = urllib2.urlopen(xmlUrl).read() manifestXml = compat_urllib_request.urlopen(manifest_url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) adoc = xml.etree.ElementTree.fromstring(manifestXml)
try: try:
videoNode = mdoc.findall('./video')[0] media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0]
info['description'] = videoNode.findall('./description')[0].text node_id = media_node.attrib['url']
info['title'] = videoNode.findall('./caption')[0].text video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
info['url'] = videoNode.findall('./file')[0].text except IndexError as err:
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text self._downloader.trouble(u'\nERROR: Invalid manifest file')
info['ext'] = info['url'].rpartition('.')[2] return
info['format'] = info['ext']
except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return
return [info] url_pr = compat_urllib_parse_urlparse(manifest_url)
url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
info['url'] = url
info['ext'] = 'f4f'
return [info]
class XVideosIE(InfoExtractor): class XVideosIE(InfoExtractor):
"""Information extractor for xvideos.com""" """Information extractor for xvideos.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
IE_NAME = u'xvideos' IE_NAME = u'xvideos'
def report_webpage(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def _real_extract(self, url):
"""Report information extraction.""" mobj = re.match(self._VALID_URL, url)
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(1)
def _real_extract(self, url): webpage = self._download_webpage(url, video_id)
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(1).decode('utf-8')
self.report_webpage(video_id) self.report_extraction(video_id)
request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
self.report_extraction(video_id)
# Extract video URL # Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage) mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.trouble(u'ERROR: unable to extract video url')
return return
video_url = urllib2.unquote(mobj.group(1).decode('utf-8')) video_url = compat_urllib_parse.unquote(mobj.group(1))
# Extract title # Extract title
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage) mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = mobj.group(1).decode('utf-8') video_title = mobj.group(1)
# Extract video thumbnail # Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = mobj.group(0).decode('utf-8') video_thumbnail = mobj.group(0)
info = { info = {
'id': video_id, 'id': video_id,
'url': video_url, 'url': video_url,
'uploader': None, 'uploader': None,
'upload_date': None, 'upload_date': None,
'title': video_title, 'title': video_title,
'ext': 'flv', 'ext': 'flv',
'format': 'flv', 'thumbnail': video_thumbnail,
'thumbnail': video_thumbnail, 'description': None,
'description': None, }
'player_url': None,
}
return [info] return [info]
class SoundcloudIE(InfoExtractor): class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com """Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed the media can be grabbed by requesting from an url composed
of the stream token and uid of the stream token and uid
""" """
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'soundcloud' IE_NAME = u'soundcloud'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_webpage(self, video_id): def report_resolve(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
# extract uploader (which is in the url) # extract uploader (which is in the url)
uploader = mobj.group(1).decode('utf-8') uploader = mobj.group(1)
# extract simple title (uploader + slug of song title) # extract simple title (uploader + slug of song title)
slug_title = mobj.group(2).decode('utf-8') slug_title = mobj.group(2)
simple_title = uploader + u'-' + slug_title simple_title = uploader + u'-' + slug_title
self.report_webpage('%s/%s' % (uploader, slug_title)) self.report_resolve('%s/%s' % (uploader, slug_title))
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title)) url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
try: resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
webpage = urllib2.urlopen(request).read() request = compat_urllib_request.Request(resolv_url)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: try:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) info_json_bytes = compat_urllib_request.urlopen(request).read()
return info_json = info_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
return
self.report_extraction('%s/%s' % (uploader, slug_title)) info = json.loads(info_json)
video_id = info['id']
self.report_extraction('%s/%s' % (uploader, slug_title))
# extract uid and stream token that soundcloud hands out for access streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage) request = compat_urllib_request.Request(streams_url)
if mobj: try:
video_id = mobj.group(1) stream_json_bytes = compat_urllib_request.urlopen(request).read()
stream_token = mobj.group(2) stream_json = stream_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
return
# extract unsimplified title streams = json.loads(stream_json)
mobj = re.search('"title":"(.*?)",', webpage) mediaURL = streams['http_mp3_128_url']
if mobj:
title = mobj.group(1).decode('utf-8')
else:
title = simple_title
# construct media url (with uid/token) return [{
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s" 'id': info['id'],
mediaURL = mediaURL % (video_id, stream_token) 'url': mediaURL,
'uploader': info['user']['username'],
# description 'upload_date': info['created_at'],
description = u'No description available' 'title': info['title'],
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage) 'ext': u'mp3',
if mobj: 'description': info['description'],
description = mobj.group(1) }]
# upload date
upload_date = None
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
if mobj:
try:
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
except Exception, e:
self._downloader.to_stderr(str(e))
# for soundcloud, a request to a cross domain is required for cookies
request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
return [{
'id': video_id.decode('utf-8'),
'url': mediaURL,
'uploader': uploader.decode('utf-8'),
'upload_date': upload_date,
'title': title,
'ext': u'mp3',
'format': u'NA',
'player_url': None,
'description': description.decode('utf-8')
}]
class InfoQIE(InfoExtractor): class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com""" """Information extractor for infoq.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' def report_extraction(self, video_id):
IE_NAME = u'infoq' """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def report_webpage(self, video_id): def _real_extract(self, url):
"""Report information extraction.""" mobj = re.match(self._VALID_URL, url)
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
def report_extraction(self, video_id): webpage = self._download_webpage(url, video_id=url)
"""Report information extraction.""" self.report_extraction(url)
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): # Extract video URL
mobj = re.match(self._VALID_URL, url) mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: unable to extract video url')
return return
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
self.report_webpage(url) # Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1)
request = urllib2.Request(url) # Extract description
try: video_description = u'No description available.'
webpage = urllib2.urlopen(request).read() mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
except (urllib2.URLError, httplib.HTTPException, socket.error), err: if mobj is not None:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) video_description = mobj.group(1)
return
self.report_extraction(url) video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.')
info = {
'id': video_id,
'url': video_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': extension, # Extension is always(?) mp4, but seems to be flv
'thumbnail': None,
'description': video_description,
}
# Extract video URL return [info]
mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url')
return
video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64'))
# Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
# Extract description
video_description = u'No description available.'
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
if mobj is not None:
video_description = mobj.group(1).decode('utf-8')
video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.')
info = {
'id': video_id,
'url': video_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': extension,
'format': extension, # Extension is always(?) mp4, but seems to be flv
'thumbnail': None,
'description': video_description,
'player_url': None,
}
return [info]
class MixcloudIE(InfoExtractor): class MixcloudIE(InfoExtractor):
"""Information extractor for www.mixcloud.com""" """Information extractor for www.mixcloud.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'mixcloud'
def __init__(self, downloader=None): _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
InfoExtractor.__init__(self, downloader) _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'mixcloud'
def report_download_json(self, file_id): def __init__(self, downloader=None):
"""Report JSON download.""" InfoExtractor.__init__(self, downloader)
self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
def report_extraction(self, file_id): def report_download_json(self, file_id):
"""Report information extraction.""" """Report JSON download."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
def get_urls(self, jsonData, fmt, bitrate='best'): def report_extraction(self, file_id):
"""Get urls from 'audio_formats' section in json""" """Report information extraction."""
file_url = None self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
try:
bitrate_list = jsonData[fmt]
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
bitrate = max(bitrate_list) # select highest
url_list = jsonData[fmt][bitrate] def get_urls(self, jsonData, fmt, bitrate='best'):
except TypeError: # we have no bitrate info. """Get urls from 'audio_formats' section in json"""
url_list = jsonData[fmt] file_url = None
return url_list try:
bitrate_list = jsonData[fmt]
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
bitrate = max(bitrate_list) # select highest
def check_urls(self, url_list): url_list = jsonData[fmt][bitrate]
"""Returns 1st active url from list""" except TypeError: # we have no bitrate info.
for url in url_list: url_list = jsonData[fmt]
try: return url_list
urllib2.urlopen(url)
return url
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
url = None
return None def check_urls(self, url_list):
"""Returns 1st active url from list"""
for url in url_list:
try:
compat_urllib_request.urlopen(url)
return url
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
url = None
def _print_formats(self, formats): return None
print 'Available formats:'
for fmt in formats.keys():
for b in formats[fmt]:
try:
ext = formats[fmt][b][0]
print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])
except TypeError: # we have no bitrate info
ext = formats[fmt][0]
print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])
break
def _real_extract(self, url): def _print_formats(self, formats):
mobj = re.match(self._VALID_URL, url) print('Available formats:')
if mobj is None: for fmt in formats.keys():
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) for b in formats[fmt]:
return try:
# extract uploader & filename from url ext = formats[fmt][b][0]
uploader = mobj.group(1).decode('utf-8') print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
file_id = uploader + "-" + mobj.group(2).decode('utf-8') except TypeError: # we have no bitrate info
ext = formats[fmt][0]
print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
break
# construct API request def _real_extract(self, url):
file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' mobj = re.match(self._VALID_URL, url)
# retrieve .json file with links to files if mobj is None:
request = urllib2.Request(file_url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
try: return
self.report_download_json(file_url) # extract uploader & filename from url
jsonData = urllib2.urlopen(request).read() uploader = mobj.group(1).decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: file_id = uploader + "-" + mobj.group(2).decode('utf-8')
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err))
return
# parse JSON # construct API request
json_data = json.loads(jsonData) file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
player_url = json_data['player_swf_url'] # retrieve .json file with links to files
formats = dict(json_data['audio_formats']) request = compat_urllib_request.Request(file_url)
try:
self.report_download_json(file_url)
jsonData = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
return
req_format = self._downloader.params.get('format', None) # parse JSON
bitrate = None json_data = json.loads(jsonData)
player_url = json_data['player_swf_url']
formats = dict(json_data['audio_formats'])
if self._downloader.params.get('listformats', None): req_format = self._downloader.params.get('format', None)
self._print_formats(formats) bitrate = None
return
if req_format is None or req_format == 'best': if self._downloader.params.get('listformats', None):
for format_param in formats.keys(): self._print_formats(formats)
url_list = self.get_urls(formats, format_param) return
# check urls
file_url = self.check_urls(url_list)
if file_url is not None:
break # got it!
else:
if req_format not in formats.keys():
self._downloader.trouble(u'ERROR: format is not available')
return
url_list = self.get_urls(formats, req_format) if req_format is None or req_format == 'best':
file_url = self.check_urls(url_list) for format_param in formats.keys():
format_param = req_format url_list = self.get_urls(formats, format_param)
# check urls
file_url = self.check_urls(url_list)
if file_url is not None:
break # got it!
else:
if req_format not in formats:
self._downloader.trouble(u'ERROR: format is not available')
return
return [{ url_list = self.get_urls(formats, req_format)
'id': file_id.decode('utf-8'), file_url = self.check_urls(url_list)
'url': file_url.decode('utf-8'), format_param = req_format
'uploader': uploader.decode('utf-8'),
'upload_date': u'NA', return [{
'title': json_data['name'], 'id': file_id.decode('utf-8'),
'ext': file_url.split('.')[-1].decode('utf-8'), 'url': file_url.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 'uploader': uploader.decode('utf-8'),
'thumbnail': json_data['thumbnail_url'], 'upload_date': None,
'description': json_data['description'], 'title': json_data['name'],
'player_url': player_url.decode('utf-8'), 'ext': file_url.split('.')[-1].decode('utf-8'),
}] 'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': json_data['thumbnail_url'],
'description': json_data['description'],
'player_url': player_url.decode('utf-8'),
}]
class StanfordOpenClassroomIE(InfoExtractor): class StanfordOpenClassroomIE(InfoExtractor):
"""Information extractor for Stanford's Open ClassRoom""" """Information extractor for Stanford's Open ClassRoom"""
_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
IE_NAME = u'stanfordoc' IE_NAME = u'stanfordoc'
def report_download_webpage(self, objid): def report_download_webpage(self, objid):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
if mobj.group('course') and mobj.group('video'): # A specific video if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course') course = mobj.group('course')
video = mobj.group('video') video = mobj.group('video')
info = { info = {
'id': course + '_' + video, 'id': course + '_' + video,
} 'uploader': None,
'upload_date': None,
}
self.report_extraction(info['id']) self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml' xmlUrl = baseUrl + video + '.xml'
try: try:
metaXml = urllib2.urlopen(xmlUrl).read() metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err)) self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
return return
mdoc = xml.etree.ElementTree.fromstring(metaXml) mdoc = xml.etree.ElementTree.fromstring(metaXml)
try: try:
info['title'] = mdoc.findall('./title')[0].text info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError: except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file') self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return return
info['ext'] = info['url'].rpartition('.')[2] info['ext'] = info['url'].rpartition('.')[2]
info['format'] = info['ext'] return [info]
return [info] elif mobj.group('course'): # A course page
elif mobj.group('course'): # A course page course = mobj.group('course')
course = mobj.group('course') info = {
info = { 'id': course,
'id': course, 'type': 'playlist',
'type': 'playlist', 'uploader': None,
} 'upload_date': None,
}
self.report_download_webpage(info['id']) self.report_download_webpage(info['id'])
try: try:
coursepage = urllib2.urlopen(url).read() coursepage = compat_urllib_request.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return return
m = re.search('<h1>([^<]+)</h1>', coursepage) m = re.search('<h1>([^<]+)</h1>', coursepage)
if m: if m:
info['title'] = unescapeHTML(m.group(1)) info['title'] = unescapeHTML(m.group(1))
else: else:
info['title'] = info['id'] info['title'] = info['id']
m = re.search('<description>([^<]+)</description>', coursepage) m = re.search('<description>([^<]+)</description>', coursepage)
if m: if m:
info['description'] = unescapeHTML(m.group(1)) info['description'] = unescapeHTML(m.group(1))
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
info['list'] = [ info['list'] = [
{ {
'type': 'reference', 'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
} }
for vpage in links] for vpage in links]
results = [] results = []
for entry in info['list']: for entry in info['list']:
assert entry['type'] == 'reference' assert entry['type'] == 'reference'
results += self.extract(entry['url']) results += self.extract(entry['url'])
return results return results
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
'type': 'playlist',
}
self.report_download_webpage(info['id']) else: # Root page
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' info = {
try: 'id': 'Stanford OpenClassroom',
rootpage = urllib2.urlopen(rootURL).read() 'type': 'playlist',
except (urllib2.URLError, httplib.HTTPException, socket.error), err: 'uploader': None,
self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err)) 'upload_date': None,
return }
info['title'] = info['id'] self.report_download_webpage(info['id'])
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
try:
rootpage = compat_urllib_request.urlopen(rootURL).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
return
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) info['title'] = info['id']
info['list'] = [
{
'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
}
for cpage in links]
results = [] links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
for entry in info['list']: info['list'] = [
assert entry['type'] == 'reference' {
results += self.extract(entry['url']) 'type': 'reference',
return results 'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
}
for cpage in links]
results = []
for entry in info['list']:
assert entry['type'] == 'reference'
results += self.extract(entry['url'])
return results
class MTVIE(InfoExtractor): class MTVIE(InfoExtractor):
"""Information extractor for MTV.com""" """Information extractor for MTV.com"""
_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
IE_NAME = u'mtv' IE_NAME = u'mtv'
def report_webpage(self, video_id): def report_extraction(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def _real_extract(self, url):
"""Report information extraction.""" mobj = re.match(self._VALID_URL, url)
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
if not mobj.group('proto'):
url = 'http://' + url
video_id = mobj.group('videoid')
def _real_extract(self, url): webpage = self._download_webpage(url, video_id)
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
if not mobj.group('proto'):
url = 'http://' + url
video_id = mobj.group('videoid')
self.report_webpage(video_id)
request = urllib2.Request(url) mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
try: if mobj is None:
webpage = urllib2.urlopen(request).read() self._downloader.trouble(u'ERROR: unable to extract song name')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: return
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
return mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract performer')
return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract song name') self._downloader.trouble(u'ERROR: unable to mtvn_uri')
return return
song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) mtvn_uri = mobj.group(1)
mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract performer')
return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to mtvn_uri') self._downloader.trouble(u'ERROR: unable to extract content id')
return return
mtvn_uri = mobj.group(1) content_id = mobj.group(1)
mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
if mobj is None: self.report_extraction(video_id)
self._downloader.trouble(u'ERROR: unable to extract content id') request = compat_urllib_request.Request(videogen_url)
return try:
content_id = mobj.group(1) metadataXml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
return
videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri mdoc = xml.etree.ElementTree.fromstring(metadataXml)
self.report_extraction(video_id) renditions = mdoc.findall('.//rendition')
request = urllib2.Request(videogen_url)
try:
metadataXml = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metadataXml) # For now, always pick the highest quality.
renditions = mdoc.findall('.//rendition') rendition = renditions[-1]
# For now, always pick the highest quality. try:
rendition = renditions[-1] _,_,ext = rendition.attrib['type'].partition('/')
format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
video_url = rendition.find('./src').text
except KeyError:
self._downloader.trouble('Invalid rendition field.')
return
try: info = {
_,_,ext = rendition.attrib['type'].partition('/') 'id': video_id,
format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] 'url': video_url,
video_url = rendition.find('./src').text 'uploader': performer,
except KeyError: 'upload_date': None,
self._downloader.trouble('Invalid rendition field.') 'title': video_title,
return 'ext': ext,
'format': format,
}
info = { return [info]
'id': video_id,
'url': video_url,
'uploader': performer,
'title': video_title,
'ext': ext,
'format': format,
}
return [info]
class YoukuIE(InfoExtractor): class YoukuIE(InfoExtractor):
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' def report_download_webpage(self, file_id):
IE_NAME = u'Youku' """Report webpage download."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id))
def __init__(self, downloader=None): def report_extraction(self, file_id):
InfoExtractor.__init__(self, downloader) """Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def report_download_webpage(self, file_id): def _gen_sid(self):
"""Report webpage download.""" nowTime = int(time.time() * 1000)
self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id) random1 = random.randint(1000,1998)
random2 = random.randint(1000,9999)
def report_extraction(self, file_id): return "%d%d%d" %(nowTime,random1,random2)
"""Report information extraction."""
self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
def _gen_sid(self): def _get_file_ID_mix_string(self, seed):
nowTime = int(time.time() * 1000) mixed = []
random1 = random.randint(1000,1998) source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
random2 = random.randint(1000,9999) seed = float(seed)
for i in range(len(source)):
seed = (seed * 211 + 30031 ) % 65536
index = math.floor(seed / 65536 * len(source) )
mixed.append(source[int(index)])
source.remove(source[int(index)])
#return ''.join(mixed)
return mixed
return "%d%d%d" %(nowTime,random1,random2) def _get_file_id(self, fileId, seed):
mixed = self._get_file_ID_mix_string(seed)
ids = fileId.split('*')
realId = []
for ch in ids:
if ch:
realId.append(mixed[int(ch)])
return ''.join(realId)
def _get_file_ID_mix_string(self, seed): def _real_extract(self, url):
mixed = [] mobj = re.match(self._VALID_URL, url)
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") if mobj is None:
seed = float(seed) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
for i in range(len(source)): return
seed = (seed * 211 + 30031 ) % 65536 video_id = mobj.group('ID')
index = math.floor(seed / 65536 * len(source) )
mixed.append(source[int(index)])
source.remove(source[int(index)])
#return ''.join(mixed)
return mixed
def _get_file_id(self, fileId, seed): info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
mixed = self._get_file_ID_mix_string(seed)
ids = fileId.split('*')
realId = []
for ch in ids:
if ch:
realId.append(mixed[int(ch)])
return ''.join(realId)
def _real_extract(self, url): request = compat_urllib_request.Request(info_url, None, std_headers)
mobj = re.match(self._VALID_URL, url) try:
if mobj is None: self.report_download_webpage(video_id)
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) jsondata = compat_urllib_request.urlopen(request).read()
return except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
video_id = mobj.group('ID') self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id self.report_extraction(video_id)
try:
jsonstr = jsondata.decode('utf-8')
config = json.loads(jsonstr)
request = urllib2.Request(info_url, None, std_headers) video_title = config['data'][0]['title']
try: seed = config['data'][0]['seed']
self.report_download_webpage(video_id)
jsondata = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
self.report_extraction(video_id) format = self._downloader.params.get('format', None)
try: supported_format = list(config['data'][0]['streamfileids'].keys())
config = json.loads(jsondata)
video_title = config['data'][0]['title'] if format is None or format == 'best':
seed = config['data'][0]['seed'] if 'hd2' in supported_format:
format = 'hd2'
format = self._downloader.params.get('format', None) else:
supported_format = config['data'][0]['streamfileids'].keys() format = 'flv'
ext = u'flv'
if format is None or format == 'best': elif format == 'worst':
if 'hd2' in supported_format: format = 'mp4'
format = 'hd2' ext = u'mp4'
else: else:
format = 'flv' format = 'flv'
ext = u'flv' ext = u'flv'
elif format == 'worst':
format = 'mp4'
ext = u'mp4'
else:
format = 'flv'
ext = u'flv'
fileid = config['data'][0]['streamfileids'][format] fileid = config['data'][0]['streamfileids'][format]
seg_number = len(config['data'][0]['segs'][format]) keys = [s['k'] for s in config['data'][0]['segs'][format]]
except (UnicodeDecodeError, ValueError, KeyError):
self._downloader.trouble(u'ERROR: unable to extract info section')
return
keys=[] files_info=[]
for i in xrange(seg_number): sid = self._gen_sid()
keys.append(config['data'][0]['segs'][format][i]['k']) fileid = self._get_file_id(fileid, seed)
#TODO check error #column 8,9 of fileid represent the segment number
#youku only could be viewed from mainland china #fileid[7:9] should be changed
except: for index, key in enumerate(keys):
self._downloader.trouble(u'ERROR: unable to extract info section')
return
files_info=[] temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
sid = self._gen_sid() download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
fileid = self._get_file_id(fileid, seed)
#column 8,9 of fileid represent the segment number info = {
#fileid[7:9] should be changed 'id': '%s_part%02d' % (video_id, index),
for index, key in enumerate(keys): 'url': download_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': ext,
}
files_info.append(info)
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) return files_info
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = {
'id': '%s_part%02d' % (video_id, index),
'url': download_url,
'uploader': None,
'title': video_title,
'ext': ext,
'format': u'NA'
}
files_info.append(info)
return files_info
class XNXXIE(InfoExtractor): class XNXXIE(InfoExtractor):
"""Information extractor for xnxx.com""" """Information extractor for xnxx.com"""
_VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)' _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
IE_NAME = u'xnxx' IE_NAME = u'xnxx'
VIDEO_URL_RE = r'flv_url=(.*?)&amp;' VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM' VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;' VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
def report_webpage(self, video_id): def report_webpage(self, video_id):
"""Report information extraction""" """Report information extraction"""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id): def report_extraction(self, video_id):
"""Report information extraction""" """Report information extraction"""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_id = mobj.group(1).decode('utf-8') video_id = mobj.group(1)
self.report_webpage(video_id) self.report_webpage(video_id)
# Get webpage content # Get webpage content
try: try:
webpage = urllib2.urlopen(url).read() webpage_bytes = compat_urllib_request.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: webpage = webpage_bytes.decode('utf-8')
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
return
result = re.search(self.VIDEO_URL_RE, webpage) result = re.search(self.VIDEO_URL_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video url') self._downloader.trouble(u'ERROR: unable to extract video url')
return return
video_url = urllib.unquote(result.group(1).decode('utf-8')) video_url = compat_urllib_parse.unquote(result.group(1))
result = re.search(self.VIDEO_TITLE_RE, webpage) result = re.search(self.VIDEO_TITLE_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video title') self._downloader.trouble(u'ERROR: unable to extract video title')
return return
video_title = result.group(1).decode('utf-8') video_title = result.group(1)
result = re.search(self.VIDEO_THUMB_RE, webpage) result = re.search(self.VIDEO_THUMB_RE, webpage)
if result is None: if result is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail') self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return return
video_thumbnail = result.group(1).decode('utf-8') video_thumbnail = result.group(1)
info = {'id': video_id, return [{
'url': video_url, 'id': video_id,
'uploader': None, 'url': video_url,
'upload_date': None, 'uploader': None,
'title': video_title, 'upload_date': None,
'ext': 'flv', 'title': video_title,
'format': 'flv', 'ext': 'flv',
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': None, 'description': None,
'player_url': None} }]
return [info]
class GooglePlusIE(InfoExtractor): class GooglePlusIE(InfoExtractor):
"""Information extractor for plus.google.com.""" """Information extractor for plus.google.com."""
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)' _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
IE_NAME = u'plus.google' IE_NAME = u'plus.google'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_extract_entry(self, url): def report_extract_entry(self, url):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url)
def report_date(self, upload_date): def report_date(self, upload_date):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date) self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
def report_uploader(self, uploader): def report_uploader(self, uploader):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader)
def report_title(self, video_title): def report_title(self, video_title):
"""Report downloading extry""" """Report downloading extry"""
self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Title: %s' % video_title)
def report_extract_vid_page(self, video_page): def report_extract_vid_page(self, video_page):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8')) self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return return
post_url = mobj.group(0) post_url = mobj.group(0)
video_id = mobj.group(2) video_id = mobj.group(1)
video_extension = 'flv' video_extension = 'flv'
# Step 1, Retrieve post webpage to extract further information # Step 1, Retrieve post webpage to extract further information
self.report_extract_entry(post_url) self.report_extract_entry(post_url)
request = urllib2.Request(post_url) request = compat_urllib_request.Request(post_url)
try: try:
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
return return
# Extract update date # Extract update date
upload_date = u'NA' upload_date = None
pattern = 'title="Timestamp">(.*?)</a>' pattern = 'title="Timestamp">(.*?)</a>'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
upload_date = mobj.group(1) upload_date = mobj.group(1)
# Convert timestring to a format suitable for filename # Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
upload_date = upload_date.strftime('%Y%m%d') upload_date = upload_date.strftime('%Y%m%d')
self.report_date(upload_date) self.report_date(upload_date)
# Extract uploader # Extract uploader
uploader = u'NA' uploader = None
pattern = r'rel\="author".*?>(.*?)</a>' pattern = r'rel\="author".*?>(.*?)</a>'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
uploader = mobj.group(1) uploader = mobj.group(1)
self.report_uploader(uploader) self.report_uploader(uploader)
# Extract title # Extract title
# Get the first line for title # Get the first line for title
video_title = u'NA' video_title = u'NA'
pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]' pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj: if mobj:
video_title = mobj.group(1) video_title = mobj.group(1)
self.report_title(video_title) self.report_title(video_title)
# Step 2, Stimulate clicking the image box to launch video # Step 2, Stimulate clicking the image box to launch video
pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
mobj = re.search(pattern, webpage) mobj = re.search(pattern, webpage)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video page URL') self._downloader.trouble(u'ERROR: unable to extract video page URL')
video_page = mobj.group(1) video_page = mobj.group(1)
request = urllib2.Request(video_page) request = compat_urllib_request.Request(video_page)
try: try:
webpage = urllib2.urlopen(request).read() webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
return return
self.report_extract_vid_page(video_page) self.report_extract_vid_page(video_page)
# Extract video links on video page # Extract video links on video page
"""Extract video links of all sizes""" """Extract video links of all sizes"""
pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage) mobj = re.findall(pattern, webpage)
if len(mobj) == 0: if len(mobj) == 0:
self._downloader.trouble(u'ERROR: unable to extract video links') self._downloader.trouble(u'ERROR: unable to extract video links')
# Sort in resolution # Sort in resolution
links = sorted(mobj) links = sorted(mobj)
# Choose the lowest of the sort, i.e. highest resolution # Choose the lowest of the sort, i.e. highest resolution
video_url = links[-1] video_url = links[-1]
# Only get the url. The resolution part in the tuple has no use anymore # Only get the url. The resolution part in the tuple has no use anymore
video_url = video_url[-1] video_url = video_url[-1]
# Treat escaped \u0026 style hex # Treat escaped \u0026 style hex
video_url = unicode(video_url, "unicode_escape") try:
video_url = video_url.decode("unicode_escape")
except AttributeError: # Python 3
return [{ video_url = bytes(video_url, 'ascii').decode('unicode-escape')
'id': video_id.decode('utf-8'),
'url': video_url,
'uploader': uploader.decode('utf-8'), return [{
'upload_date': upload_date.decode('utf-8'), 'id': video_id,
'title': video_title.decode('utf-8'), 'url': video_url,
'ext': video_extension.decode('utf-8'), 'uploader': uploader,
'format': u'NA', 'upload_date': upload_date,
'player_url': None, 'title': video_title,
}] 'ext': video_extension,
}]
class NBAIE(InfoExtractor):
class YouPornIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$'
"""Information extractor for youporn.com.""" IE_NAME = u'nba'
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' def _real_extract(self, url):
IE_NAME = u'youporn' mobj = re.match(self._VALID_URL, url)
VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>' if mobj is None:
VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>' self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>' return
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">' video_id = mobj.group(1)
if video_id.endswith('/index.html'):
def __init__(self, downloader=None): video_id = video_id[:-len('/index.html')]
InfoExtractor.__init__(self, downloader)
webpage = self._download_webpage(url, video_id)
def report_id(self, video_id):
"""Report finding video ID""" video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id) def _findProp(rexp, default=None):
m = re.search(rexp, webpage)
def report_webpage(self, url): if m:
"""Report downloading page""" return unescapeHTML(m.group(1))
self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url) else:
return default
def report_title(self, video_title):
"""Report dfinding title""" shortened_video_id = video_id.rpartition('/')[2]
self._downloader.to_screen(u'[youporn] Title: %s' % video_title) title = _findProp(r'<meta property="og:title" content="(.*?)"', shortened_video_id).replace('NBA.com: ', '')
info = {
def report_uploader(self, uploader): 'id': shortened_video_id,
"""Report dfinding title""" 'url': video_url,
self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader) 'ext': 'mp4',
'title': title,
def report_upload_date(self, video_date): 'uploader_date': _findProp(r'<b>Date:</b> (.*?)</div>'),
"""Report finding date""" 'description': _findProp(r'<div class="description">(.*?)</h1>'),
self._downloader.to_screen(u'[youporn] Date: %s' % video_date) }
return [info]
def _print_formats(self, formats):
"""Print all available formats""" class JustinTVIE(InfoExtractor):
print 'Available formats:' """Information extractor for justin.tv and twitch.tv"""
print u'ext\t\tformat' # TODO: One broadcast may be split into multiple videos. The key
print u'---------------------------------' # 'broadcast_id' is the same for all parts, and 'broadcast_part'
for format in formats: # starts at 1 and increases. Can we treat all parts as one video?
print u'%s\t\t%s' % (format['ext'], format['format'])
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
def _specific(self, req_format, formats): ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$"""
for x in formats: _JUSTIN_PAGE_LIMIT = 100
if(x["format"]==req_format): IE_NAME = u'justin.tv'
return x
return None def report_extraction(self, file_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) def report_download_page(self, channel, offset):
if mobj is None: """Report attempt to download a single page of videos."""
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' %
return (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
video_id = mobj.group('videoid').decode('utf-8') # Return count of items, list of *valid* items
self.report_id(video_id) def _parse_page(self, url):
try:
# Get webpage content urlh = compat_urllib_request.urlopen(url)
try: webpage_bytes = urlh.read()
webpage = urllib2.urlopen(url).read() webpage = webpage_bytes.decode('utf-8', 'ignore')
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
return return
self.report_webpage(url)
response = json.loads(webpage)
# Get the video title info = []
result = re.search(self.VIDEO_TITLE_RE, webpage) for clip in response:
if result is None: video_url = clip['video_file_url']
self._downloader.trouble(u'ERROR: unable to extract video title') if video_url:
return video_extension = os.path.splitext(video_url)[1][1:]
video_title = result.group('title').decode('utf-8').strip() video_date = re.sub('-', '', clip['created_on'][:10])
self.report_title(video_title) info.append({
'id': clip['id'],
# Get the video date 'url': video_url,
result = re.search(self.VIDEO_DATE_RE, webpage) 'title': clip['title'],
if result is None: 'uploader': clip.get('user_id', clip.get('channel_id')),
self._downloader.trouble(u'ERROR: unable to extract video date') 'upload_date': video_date,
return 'ext': video_extension,
upload_date = result.group('date').decode('utf-8').strip() })
self.report_upload_date(upload_date) return (len(response), info)
# Get the video uploader def _real_extract(self, url):
result = re.search(self.VIDEO_UPLOADER_RE, webpage) mobj = re.match(self._VALID_URL, url)
if result is None: if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader') self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
video_uploader = result.group('uploader').decode('utf-8').strip()
video_uploader = clean_html( video_uploader ) api = 'http://api.justin.tv'
self.report_uploader(video_uploader) video_id = mobj.group(mobj.lastindex)
paged = False
# Get all of the formats available if mobj.lastindex == 1:
result = re.search(self.DOWNLOAD_LIST_RE, webpage) paged = True
if result is None: api += '/channel/archives/%s.json'
self._downloader.trouble(u'ERROR: unable to extract download list') else:
return api += '/clip/show/%s.json'
download_list_html = result.group('download_list').decode('utf-8').strip() api = api % (video_id,)
# Get all of the links from the page self.report_extraction(video_id)
links = re.findall(self.LINK_RE, download_list_html)
if(len(links) == 0): info = []
self._downloader.trouble(u'ERROR: no known formats available for video') offset = 0
return limit = self._JUSTIN_PAGE_LIMIT
while True:
self._downloader.to_screen(u'[youporn] Links found: %d' % len(links)) if paged:
self.report_download_page(video_id, offset)
formats = [] page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
for link in links: page_count, page_info = self._parse_page(page_url)
info.extend(page_info)
# A link looks like this: if not paged or page_count != limit:
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0 break
# A path looks like this: offset += limit
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4 return info
video_url = unescapeHTML( link.decode('utf-8') )
path = urlparse( video_url ).path class FunnyOrDieIE(InfoExtractor):
extension = os.path.splitext( path )[1][1:] _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
format = path.split('/')[4].split('_')[:2]
size = format[0] def _real_extract(self, url):
bitrate = format[1] mobj = re.match(self._VALID_URL, url)
format = "-".join( format ) if mobj is None:
title = u'%s-%s-%s' % (video_title, size, bitrate) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
formats.append({
'id': video_id, video_id = mobj.group('id')
'url': video_url, webpage = self._download_webpage(url, video_id)
'uploader': video_uploader,
'upload_date': upload_date, m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
'title': title, if not m:
'ext': extension, self._downloader.trouble(u'ERROR: unable to find video information')
'format': format, video_url = unescapeHTML(m.group('url'))
'thumbnail': None,
'description': None, m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
'player_url': None if not m:
}) self._downloader.trouble(u'Cannot find video title')
title = unescapeHTML(m.group('title'))
if self._downloader.params.get('listformats', None):
self._print_formats(formats) m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
return if m:
desc = unescapeHTML(m.group('desc'))
req_format = self._downloader.params.get('format', None) else:
#format_limit = self._downloader.params.get('format_limit', None) desc = None
self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
info = {
'id': video_id,
if req_format is None or req_format == 'best': 'url': video_url,
return [formats[0]] 'ext': 'mp4',
elif req_format == 'worst': 'title': title,
return [formats[-1]] 'description': desc,
elif req_format in ('-1', 'all'): }
return formats return [info]
else:
format = self._specific( req_format, formats ) class TweetReelIE(InfoExtractor):
if result is None: _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
self._downloader.trouble(u'ERROR: requested format not available')
return def _real_extract(self, url):
return [format] mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
class PornotubeIE(InfoExtractor): video_id = mobj.group('id')
"""Information extractor for pornotube.com.""" webpage = self._download_webpage(url, video_id)
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$' m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
IE_NAME = u'pornotube' if not m:
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",' self._downloader.trouble(u'ERROR: Cannot find status ID')
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' status_id = m.group(1)
m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
def __init__(self, downloader=None): if not m:
InfoExtractor.__init__(self, downloader) self._downloader.trouble(u'WARNING: Cannot find description')
desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
def report_extract_entry(self, url):
"""Report downloading extry""" m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8')) if not m:
self._downloader.trouble(u'ERROR: Cannot find uploader')
def report_date(self, upload_date): uploader = unescapeHTML(m.group('uploader'))
"""Report finding uploaded date""" uploader_id = unescapeHTML(m.group('uploader_id'))
self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
m = re.search(r'<span unixtime="([0-9]+)"', webpage)
def report_webpage(self, url): if not m:
"""Report downloading page""" self._downloader.trouble(u'ERROR: Cannot find upload date')
self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url) upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
def report_title(self, video_title): title = desc
"""Report downloading extry""" video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
info = {
def _real_extract(self, url): 'id': video_id,
mobj = re.match(self._VALID_URL, url) 'url': video_url,
if mobj is None: 'ext': 'mov',
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 'title': title,
return 'description': desc,
'uploader': uploader,
video_id = mobj.group('videoid').decode('utf-8') 'uploader_id': uploader_id,
video_title = mobj.group('title').decode('utf-8') 'internal_id': status_id,
self.report_title(video_title); 'upload_date': upload_date
}
# Get webpage content return [info]
try:
webpage = urllib2.urlopen(url).read() class SteamIE(InfoExtractor):
except (urllib2.URLError, httplib.HTTPException, socket.error), err: _VALID_URL = r"""http://store.steampowered.com/
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) (?P<urltype>video|app)/ #If the page is only for videos or for a game
return (?P<gameID>\d+)/?
self.report_webpage(url) (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
"""
# Get the video URL
result = re.search(self.VIDEO_URL_RE, webpage) def suitable(self, url):
if result is None: """Receives a URL and returns True if suitable for this IE."""
self._downloader.trouble(u'ERROR: unable to extract video url') return re.match(self._VALID_URL, url, re.VERBOSE) is not None
return
video_url = urllib.unquote(result.group('url').decode('utf-8')) def _real_extract(self, url):
self.report_extract_entry(video_url) m = re.match(self._VALID_URL, url, re.VERBOSE)
urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
#Get the uploaded date gameID = m.group('gameID')
result = re.search(self.VIDEO_UPLOADED_RE, webpage) videourl = 'http://store.steampowered.com/video/%s/' % gameID
if result is None: webpage = self._download_webpage(videourl, gameID)
self._downloader.trouble(u'ERROR: unable to extract video title') mweb = re.finditer(urlRE, webpage)
return namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
upload_date = result.group('date').decode('utf-8') titles = re.finditer(namesRE, webpage)
self.report_date(upload_date); videos = []
for vid,vtitle in zip(mweb,titles):
video_id = vid.group('videoID')
info = {'id': video_id, title = vtitle.group('videoName')
'url': video_url, video_url = vid.group('videoURL')
'uploader': None, if not video_url:
'upload_date': upload_date, self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
'title': video_title, info = {
'ext': 'flv', 'id':video_id,
'format': 'flv', 'url':video_url,
'thumbnail': None, 'ext': 'flv',
'description': None, 'title': unescapeHTML(title)
'player_url': None} }
videos.append(info)
return [info] return videos
class UstreamIE(InfoExtractor):
_VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)'
IE_NAME = u'ustream'
class YouJizzIE(InfoExtractor):
"""Information extractor for youjizz.com.""" def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/([^.]+).html$' video_id = m.group('videoID')
IE_NAME = u'youjizz' video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>' webpage = self._download_webpage(url, video_id)
EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)' m = re.search(r'data-title="(?P<title>.+)"',webpage)
SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);' title = m.group('title')
m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
def __init__(self, downloader=None): uploader = m.group('uploader')
InfoExtractor.__init__(self, downloader) info = {
'id':video_id,
def report_extract_entry(self, url): 'url':video_url,
"""Report downloading extry""" 'ext': 'flv',
self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8')) 'title': title,
'uploader': uploader
def report_webpage(self, url): }
"""Report downloading page""" return [info]
self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
def report_title(self, video_title): def gen_extractors():
"""Report downloading extry""" """ Return a list of an instance of every supported extractor.
self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8')) The order does matter; the first extractor matched is the one handling the URL.
"""
def report_embed_page(self, embed_page): return [
"""Report downloading extry""" YoutubePlaylistIE(),
self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8')) YoutubeChannelIE(),
YoutubeUserIE(),
def _real_extract(self, url): YoutubeSearchIE(),
# Get webpage content YoutubeIE(),
try: MetacafeIE(),
webpage = urllib2.urlopen(url).read() DailymotionIE(),
except (urllib2.URLError, httplib.HTTPException, socket.error), err: GoogleSearchIE(),
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) PhotobucketIE(),
return YahooIE(),
self.report_webpage(url) YahooSearchIE(),
DepositFilesIE(),
# Get the video title FacebookIE(),
result = re.search(self.VIDEO_TITLE_RE, webpage) BlipTVUserIE(),
if result is None: BlipTVIE(),
self._downloader.trouble(u'ERROR: unable to extract video title') VimeoIE(),
return MyVideoIE(),
video_title = result.group('title').decode('utf-8').strip() ComedyCentralIE(),
self.report_title(video_title) EscapistIE(),
CollegeHumorIE(),
# Get the embed page XVideosIE(),
result = re.search(self.EMBED_PAGE_RE, webpage) SoundcloudIE(),
if result is None: InfoQIE(),
self._downloader.trouble(u'ERROR: unable to extract embed page') MixcloudIE(),
return StanfordOpenClassroomIE(),
MTVIE(),
embed_page_url = result.group(0).decode('utf-8').strip() YoukuIE(),
video_id = result.group('videoid').decode('utf-8') XNXXIE(),
self.report_embed_page(embed_page_url) GooglePlusIE(),
ArteTvIE(),
try: NBAIE(),
webpage = urllib2.urlopen(embed_page_url).read() JustinTVIE(),
except (urllib2.URLError, httplib.HTTPException, socket.error), err: FunnyOrDieIE(),
self._downloader.trouble(u'ERROR: unable to download video embed page: %s' % err) TweetReelIE(),
return SteamIE(),
UstreamIE(),
# Get the video URL GenericIE()
result = re.search(self.SOURCE_RE, webpage) ]
if result is None:
self._downloader.trouble(u'ERROR: unable to extract video url')
return
video_url = result.group('source').decode('utf-8')
self.report_extract_entry(video_url)
info = {'id': video_id,
'url': video_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'thumbnail': None,
'description': None,
'player_url': embed_page_url}
return [info]

View File

@ -1,198 +1,204 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os import os
import subprocess import subprocess
import sys import sys
import time import time
from utils import * from .utils import *
class PostProcessor(object): class PostProcessor(object):
"""Post Processor class. """Post Processor class.
PostProcessor objects can be added to downloaders with their PostProcessor objects can be added to downloaders with their
add_post_processor() method. When the downloader has finished a add_post_processor() method. When the downloader has finished a
successful download, it will take its internal chain of PostProcessors successful download, it will take its internal chain of PostProcessors
and start calling the run() method on each one of them, first with and start calling the run() method on each one of them, first with
an initial argument and then with the returned value of the previous an initial argument and then with the returned value of the previous
PostProcessor. PostProcessor.
The chain will be stopped if one of them ever returns None or the end The chain will be stopped if one of them ever returns None or the end
of the chain is reached. of the chain is reached.
PostProcessor objects follow a "mutual registration" process similar PostProcessor objects follow a "mutual registration" process similar
to InfoExtractor objects. to InfoExtractor objects.
""" """
_downloader = None _downloader = None
def __init__(self, downloader=None): def __init__(self, downloader=None):
self._downloader = downloader self._downloader = downloader
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this PP.""" """Sets the downloader for this PP."""
self._downloader = downloader self._downloader = downloader
def run(self, information): def run(self, information):
"""Run the PostProcessor. """Run the PostProcessor.
The "information" argument is a dictionary like the ones The "information" argument is a dictionary like the ones
composed by InfoExtractors. The only difference is that this composed by InfoExtractors. The only difference is that this
one has an extra field called "filepath" that points to the one has an extra field called "filepath" that points to the
downloaded file. downloaded file.
When this method returns None, the postprocessing chain is When this method returns None, the postprocessing chain is
stopped. However, this method may return an information stopped. However, this method may return an information
dictionary that will be passed to the next postprocessing dictionary that will be passed to the next postprocessing
object in the chain. It can be the one it received after object in the chain. It can be the one it received after
changing some fields. changing some fields.
In addition, this method may raise a PostProcessingError In addition, this method may raise a PostProcessingError
exception that will be taken into account by the downloader exception that will be taken into account by the downloader
it was called from. it was called from.
""" """
return information # by default, do nothing return information # by default, do nothing
class AudioConversionError(BaseException): class AudioConversionError(BaseException):
def __init__(self, message): def __init__(self, message):
self.message = message self.message = message
class FFmpegExtractAudioPP(PostProcessor): class FFmpegExtractAudioPP(PostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False, nopostoverwrites=False):
PostProcessor.__init__(self, downloader) PostProcessor.__init__(self, downloader)
if preferredcodec is None: if preferredcodec is None:
preferredcodec = 'best' preferredcodec = 'best'
self._preferredcodec = preferredcodec self._preferredcodec = preferredcodec
self._preferredquality = preferredquality self._preferredquality = preferredquality
self._keepvideo = keepvideo self._keepvideo = keepvideo
self._exes = self.detect_executables() self._nopostoverwrites = nopostoverwrites
self._exes = self.detect_executables()
@staticmethod @staticmethod
def detect_executables(): def detect_executables():
def executable(exe): def executable(exe):
try: try:
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError: except OSError:
return False return False
return exe return exe
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((program, executable(program)) for program in programs) return dict((program, executable(program)) for program in programs)
def get_audio_codec(self, path): def get_audio_codec(self, path):
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
try: try:
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)] cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
output = handle.communicate()[0] output = handle.communicate()[0]
if handle.wait() != 0: if handle.wait() != 0:
return None return None
except (IOError, OSError): except (IOError, OSError):
return None return None
audio_codec = None audio_codec = None
for line in output.split('\n'): for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='): if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip() audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None: elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec return audio_codec
return None return None
def run_ffmpeg(self, path, out_path, codec, more_opts): def run_ffmpeg(self, path, out_path, codec, more_opts):
if not self._exes['ffmpeg'] and not self._exes['avconv']: if not self._exes['ffmpeg'] and not self._exes['avconv']:
raise AudioConversionError('ffmpeg or avconv not found. Please install one.') raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
if codec is None: if codec is None:
acodec_opts = [] acodec_opts = []
else: else:
acodec_opts = ['-acodec', codec] acodec_opts = ['-acodec', codec]
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn'] cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
+ acodec_opts + more_opts + + acodec_opts + more_opts +
['--', encodeFilename(out_path)]) ['--', encodeFilename(out_path)])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate() stdout,stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
msg = stderr.strip().split('\n')[-1] msg = stderr.strip().split('\n')[-1]
raise AudioConversionError(msg) raise AudioConversionError(msg)
def run(self, information): def run(self, information):
path = information['filepath'] path = information['filepath']
filecodec = self.get_audio_codec(path) filecodec = self.get_audio_codec(path)
if filecodec is None: if filecodec is None:
self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
return None return None
more_opts = [] more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if self._preferredcodec == 'm4a' and filecodec == 'aac': if self._preferredcodec == 'm4a' and filecodec == 'aac':
# Lossless, but in another container # Lossless, but in another container
acodec = 'copy' acodec = 'copy'
extension = self._preferredcodec extension = self._preferredcodec
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis']: elif filecodec in ['aac', 'mp3', 'vorbis']:
# Lossless if possible # Lossless if possible
acodec = 'copy' acodec = 'copy'
extension = filecodec extension = filecodec
if filecodec == 'aac': if filecodec == 'aac':
more_opts = ['-f', 'adts'] more_opts = ['-f', 'adts']
if filecodec == 'vorbis': if filecodec == 'vorbis':
extension = 'ogg' extension = 'ogg'
else: else:
# MP3 otherwise. # MP3 otherwise.
acodec = 'libmp3lame' acodec = 'libmp3lame'
extension = 'mp3' extension = 'mp3'
more_opts = [] more_opts = []
if self._preferredquality is not None: if self._preferredquality is not None:
if int(self._preferredquality) < 10: if int(self._preferredquality) < 10:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else: else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
else: else:
# We convert the audio (lossy) # We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec extension = self._preferredcodec
more_opts = [] more_opts = []
if self._preferredquality is not None: if self._preferredquality is not None:
if int(self._preferredquality) < 10: if int(self._preferredquality) < 10:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else: else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
if self._preferredcodec == 'aac': if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts'] more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a': if self._preferredcodec == 'm4a':
more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis': if self._preferredcodec == 'vorbis':
extension = 'ogg' extension = 'ogg'
if self._preferredcodec == 'wav': if self._preferredcodec == 'wav':
extension = 'wav' extension = 'wav'
more_opts += ['-f', 'wav'] more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension new_path = prefix + sep + extension
self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path) try:
try: if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
self.run_ffmpeg(path, new_path, acodec, more_opts) self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
except: else:
etype,e,tb = sys.exc_info() self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
if isinstance(e, AudioConversionError): self.run_ffmpeg(path, new_path, acodec, more_opts)
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) except:
else: etype,e,tb = sys.exc_info()
self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')) if isinstance(e, AudioConversionError):
return None self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
else:
self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
return None
# Try to update the date time for extracted audio file. # Try to update the date time for extracted audio file.
if information.get('filetime') is not None: if information.get('filetime') is not None:
try: try:
os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
except: except:
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
if not self._keepvideo: if not self._keepvideo:
try: try:
os.remove(encodeFilename(path)) os.remove(encodeFilename(path))
except (IOError, OSError): except (IOError, OSError):
self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
return None return None
information['filepath'] = new_path information['filepath'] = new_path
return information return information

View File

@ -2,33 +2,30 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import with_statement from __future__ import with_statement
from __future__ import absolute_import
__authors__ = ( __authors__ = (
'Ricardo Garcia Gonzalez', 'Ricardo Garcia Gonzalez',
'Danny Colligan', 'Danny Colligan',
'Benjamin Johnson', 'Benjamin Johnson',
'Vasyl\' Vavrychuk', 'Vasyl\' Vavrychuk',
'Witold Baryluk', 'Witold Baryluk',
'Paweł Paprota', 'Paweł Paprota',
'Gergely Imreh', 'Gergely Imreh',
'Rogério Brito', 'Rogério Brito',
'Philipp Hagemeister', 'Philipp Hagemeister',
'Sören Schulze', 'Sören Schulze',
'Kevin Ngo', 'Kevin Ngo',
'Ori Avtalion', 'Ori Avtalion',
'shizeeg', 'shizeeg',
'Filippo Valsorda', 'Filippo Valsorda',
) 'Christian Albrecht',
'Dave Vasilevsky',
'Jaime Marquínez Ferrándiz',
)
__license__ = 'Public Domain' __license__ = 'Public Domain'
__version__ = '2012.10.09'
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
UPDATE_URL_VERSION = 'https://raw.github.com/rg3/youtube-dl/master/LATEST_VERSION'
UPDATE_URL_EXE = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl.exe'
import cookielib
import getpass import getpass
import optparse import optparse
import os import os
@ -37,530 +34,459 @@ import shlex
import socket import socket
import subprocess import subprocess
import sys import sys
import urllib2
import warnings import warnings
import platform
from utils import * from .utils import *
from FileDownloader import * from .update import update_self
from InfoExtractors import * from .version import __version__
from PostProcessor import * from .FileDownloader import *
from .InfoExtractors import gen_extractors
def updateSelf(downloader, filename): from .PostProcessor import *
''' Update the program file with the latest version from the repository '''
# Note: downloader only used for options
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
downloader.to_screen(u'Updating to latest version...')
urlv = urllib2.urlopen(UPDATE_URL_VERSION)
newversion = urlv.read().strip()
if newversion == __version__:
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
return
urlv.close()
if hasattr(sys, "frozen"): #py2exe
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory)
try:
urlh = urllib2.urlopen(UPDATE_URL_EXE)
newcontent = urlh.read()
urlh.close()
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError), err:
sys.exit('ERROR: unable to download latest version')
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
print >> b, """
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
""" %(exe, exe, bat)
b.close()
os.startfile(bat)
except (IOError, OSError), err:
sys.exit('ERROR: unable to overwrite current version')
else:
try:
urlh = urllib2.urlopen(UPDATE_URL)
newcontent = urlh.read()
urlh.close()
except (IOError, OSError), err:
sys.exit('ERROR: unable to download latest version')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError), err:
sys.exit('ERROR: unable to overwrite current version')
downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
def parseOpts(): def parseOpts():
def _readOptions(filename_bytes): def _readOptions(filename_bytes):
try: try:
optionf = open(filename_bytes) optionf = open(filename_bytes)
except IOError: except IOError:
return [] # silently skip if file is not present return [] # silently skip if file is not present
try: try:
res = [] res = []
for l in optionf: for l in optionf:
res += shlex.split(l, comments=True) res += shlex.split(l, comments=True)
finally: finally:
optionf.close() optionf.close()
return res return res
def _format_option_string(option): def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR''' ''' ('-o', '--option') -> -o, --format METAVAR'''
opts = [] opts = []
if option._short_opts: opts.append(option._short_opts[0]) if option._short_opts:
if option._long_opts: opts.append(option._long_opts[0]) opts.append(option._short_opts[0])
if len(opts) > 1: opts.insert(1, ', ') if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value(): opts.append(' %s' % option.metavar) if option.takes_value(): opts.append(' %s' % option.metavar)
return "".join(opts) return "".join(opts)
def _find_term_columns(): def _find_term_columns():
columns = os.environ.get('COLUMNS', None) columns = os.environ.get('COLUMNS', None)
if columns: if columns:
return int(columns) return int(columns)
try: try:
sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = sp.communicate() out,err = sp.communicate()
return int(out.split()[1]) return int(out.split()[1])
except: except:
pass pass
return None return None
max_width = 80 max_width = 80
max_help_position = 80 max_help_position = 80
# No need to wrap help messages if we're on a wide console # No need to wrap help messages if we're on a wide console
columns = _find_term_columns() columns = _find_term_columns()
if columns: max_width = columns if columns: max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string fmt.format_option_strings = _format_option_string
kw = { kw = {
'version' : __version__, 'version' : __version__,
'formatter' : fmt, 'formatter' : fmt,
'usage' : '%prog [options] url [url...]', 'usage' : '%prog [options] url [url...]',
'conflict_handler' : 'resolve', 'conflict_handler' : 'resolve',
} }
parser = optparse.OptionParser(**kw) parser = optparse.OptionParser(**kw)
# option groups # option groups
general = optparse.OptionGroup(parser, 'General Options') general = optparse.OptionGroup(parser, 'General Options')
selection = optparse.OptionGroup(parser, 'Video Selection') selection = optparse.OptionGroup(parser, 'Video Selection')
authentication = optparse.OptionGroup(parser, 'Authentication Options') authentication = optparse.OptionGroup(parser, 'Authentication Options')
video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format = optparse.OptionGroup(parser, 'Video Format Options')
postproc = optparse.OptionGroup(parser, 'Post-processing Options') postproc = optparse.OptionGroup(parser, 'Post-processing Options')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options') filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
general.add_option('-h', '--help', general.add_option('-h', '--help',
action='help', help='print this help text and exit') action='help', help='print this help text and exit')
general.add_option('-v', '--version', general.add_option('-v', '--version',
action='version', help='print program version and exit') action='version', help='print program version and exit')
general.add_option('-U', '--update', general.add_option('-U', '--update',
action='store_true', dest='update_self', help='update this program to latest version') action='store_true', dest='update_self', help='update this program to latest version')
general.add_option('-i', '--ignore-errors', general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False) action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
general.add_option('-r', '--rate-limit', general.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)') dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
general.add_option('-R', '--retries', general.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10) dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
general.add_option('--dump-user-agent', general.add_option('--buffer-size',
action='store_true', dest='dump_user_agent', dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
help='display the current browser identification', default=False) general.add_option('--no-resize-buffer',
general.add_option('--user-agent', action='store_true', dest='noresizebuffer',
dest='user_agent', help='specify a custom user agent', metavar='UA') help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
general.add_option('--list-extractors', general.add_option('--dump-user-agent',
action='store_true', dest='list_extractors', action='store_true', dest='dump_user_agent',
help='List all supported extractors and the URLs they would handle', default=False) help='display the current browser identification', default=False)
general.add_option('--user-agent',
dest='user_agent', help='specify a custom user agent', metavar='UA')
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
selection.add_option('--playlist-start', selection.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1) dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
selection.add_option('--playlist-end', selection.add_option('--playlist-end',
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1) dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)') selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)') selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None) selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
authentication.add_option('-u', '--username', authentication.add_option('-u', '--username',
dest='username', metavar='USERNAME', help='account username') dest='username', metavar='USERNAME', help='account username')
authentication.add_option('-p', '--password', authentication.add_option('-p', '--password',
dest='password', metavar='PASSWORD', help='account password') dest='password', metavar='PASSWORD', help='account password')
authentication.add_option('-n', '--netrc', authentication.add_option('-n', '--netrc',
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
video_format.add_option('-f', '--format', video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', help='video format code') action='store', dest='format', metavar='FORMAT', help='video format code')
video_format.add_option('--all-formats', video_format.add_option('--all-formats',
action='store_const', dest='format', help='download all available video formats', const='all') action='store_const', dest='format', help='download all available video formats', const='all')
video_format.add_option('--prefer-free-formats', video_format.add_option('--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested') action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
video_format.add_option('--max-quality', video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats', video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)') action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
video_format.add_option('--write-srt', video_format.add_option('--write-srt',
action='store_true', dest='writesubtitles', action='store_true', dest='writesubtitles',
help='write video closed captions to a .srt file (currently youtube only)', default=False) help='write video closed captions to a .srt file (currently youtube only)', default=False)
video_format.add_option('--srt-lang', video_format.add_option('--srt-lang',
action='store', dest='subtitleslang', metavar='LANG', action='store', dest='subtitleslang', metavar='LANG',
help='language of the closed captions to download (optional) use IETF language tags like \'en\'') help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
verbosity.add_option('-q', '--quiet', verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False) action='store_true', dest='quiet', help='activates quiet mode', default=False)
verbosity.add_option('-s', '--simulate', verbosity.add_option('-s', '--simulate',
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False) action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
verbosity.add_option('--skip-download', verbosity.add_option('--skip-download',
action='store_true', dest='skip_download', help='do not download the video', default=False) action='store_true', dest='skip_download', help='do not download the video', default=False)
verbosity.add_option('-g', '--get-url', verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False) action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
verbosity.add_option('-e', '--get-title', verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False) action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
verbosity.add_option('--get-thumbnail', verbosity.add_option('--get-thumbnail',
action='store_true', dest='getthumbnail', action='store_true', dest='getthumbnail',
help='simulate, quiet but print thumbnail URL', default=False) help='simulate, quiet but print thumbnail URL', default=False)
verbosity.add_option('--get-description', verbosity.add_option('--get-description',
action='store_true', dest='getdescription', action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False) help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-filename', verbosity.add_option('--get-filename',
action='store_true', dest='getfilename', action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False) help='simulate, quiet but print output filename', default=False)
verbosity.add_option('--get-format', verbosity.add_option('--get-format',
action='store_true', dest='getformat', action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False) help='simulate, quiet but print output format', default=False)
verbosity.add_option('--no-progress', verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False) action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title', verbosity.add_option('--console-title',
action='store_true', dest='consoletitle', action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False) help='display progress in console titlebar', default=False)
verbosity.add_option('-v', '--verbose', verbosity.add_option('-v', '--verbose',
action='store_true', dest='verbose', help='print various debugging information', default=False) action='store_true', dest='verbose', help='print various debugging information', default=False)
filesystem.add_option('-t', '--title', filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='use title in file name', default=False) action='store_true', dest='usetitle', help='use title in file name', default=False)
filesystem.add_option('--id', filesystem.add_option('--id',
action='store_true', dest='useid', help='use video ID in file name', default=False) action='store_true', dest='useid', help='use video ID in file name', default=False)
filesystem.add_option('-l', '--literal', filesystem.add_option('-l', '--literal',
action='store_true', dest='useliteral', help='use literal title in file name', default=False) action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
filesystem.add_option('-A', '--auto-number', filesystem.add_option('-A', '--auto-number',
action='store_true', dest='autonumber', action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False) help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output', filesystem.add_option('-o', '--output',
dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout.') dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')
filesystem.add_option('-a', '--batch-file', filesystem.add_option('--restrict-filenames',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') action='store_true', dest='restrictfilenames',
filesystem.add_option('-w', '--no-overwrites', help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) filesystem.add_option('-a', '--batch-file',
filesystem.add_option('-c', '--continue', dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True) filesystem.add_option('-w', '--no-overwrites',
filesystem.add_option('--no-continue', action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
action='store_false', dest='continue_dl', filesystem.add_option('-c', '--continue',
help='do not resume partially downloaded files (restart from beginning)') action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
filesystem.add_option('--cookies', filesystem.add_option('--no-continue',
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in') action='store_false', dest='continue_dl',
filesystem.add_option('--no-part', help='do not resume partially downloaded files (restart from beginning)')
action='store_true', dest='nopart', help='do not use .part files', default=False) filesystem.add_option('--cookies',
filesystem.add_option('--no-mtime', dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
action='store_false', dest='updatetime', filesystem.add_option('--no-part',
help='do not use the Last-modified header to set the file modification time', default=True) action='store_true', dest='nopart', help='do not use .part files', default=False)
filesystem.add_option('--write-description', filesystem.add_option('--no-mtime',
action='store_true', dest='writedescription', action='store_false', dest='updatetime',
help='write video description to a .description file', default=False) help='do not use the Last-modified header to set the file modification time', default=True)
filesystem.add_option('--write-info-json', filesystem.add_option('--write-description',
action='store_true', dest='writeinfojson', action='store_true', dest='writedescription',
help='write video metadata to a .info.json file', default=False) help='write video description to a .description file', default=False)
filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False)
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False, postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best', postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default') help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5', postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)') help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False, postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default') help='keeps the video file on disk after the post-processing; the video is erased by default')
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
parser.add_option_group(general) parser.add_option_group(general)
parser.add_option_group(selection) parser.add_option_group(selection)
parser.add_option_group(filesystem) parser.add_option_group(filesystem)
parser.add_option_group(verbosity) parser.add_option_group(verbosity)
parser.add_option_group(video_format) parser.add_option_group(video_format)
parser.add_option_group(authentication) parser.add_option_group(authentication)
parser.add_option_group(postproc) parser.add_option_group(postproc)
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home: if xdg_config_home:
userConf = os.path.join(xdg_config_home, 'youtube-dl.conf') userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
else: else:
userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:] argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
opts, args = parser.parse_args(argv) opts, args = parser.parse_args(argv)
return parser, opts, args return parser, opts, args
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [
YoutubePlaylistIE(),
YoutubeChannelIE(),
YoutubeUserIE(),
YoutubeSearchIE(),
YoutubeIE(),
MetacafeIE(),
DailymotionIE(),
GoogleIE(),
GoogleSearchIE(),
PhotobucketIE(),
YahooIE(),
YahooSearchIE(),
DepositFilesIE(),
FacebookIE(),
BlipTVUserIE(),
BlipTVIE(),
VimeoIE(),
MyVideoIE(),
ComedyCentralIE(),
EscapistIE(),
CollegeHumorIE(),
XVideosIE(),
SoundcloudIE(),
InfoQIE(),
MixcloudIE(),
StanfordOpenClassroomIE(),
MTVIE(),
YoukuIE(),
XNXXIE(),
GooglePlusIE(),
PornotubeIE(),
YouPornIE(),
YouJizzIE(),
GenericIE()
]
def _real_main(): def _real_main():
parser, opts, args = parseOpts() parser, opts, args = parseOpts()
# Open appropriate CookieJar # Open appropriate CookieJar
if opts.cookiefile is None: if opts.cookiefile is None:
jar = cookielib.CookieJar() jar = compat_cookiejar.CookieJar()
else: else:
try: try:
jar = cookielib.MozillaCookieJar(opts.cookiefile) jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK): if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
jar.load() jar.load()
except (IOError, OSError), err: except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to open cookie file') sys.exit(u'ERROR: unable to open cookie file')
# Set user agent # Set user agent
if opts.user_agent is not None: if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent std_headers['User-Agent'] = opts.user_agent
# Dump user agent # Dump user agent
if opts.dump_user_agent: if opts.dump_user_agent:
print std_headers['User-Agent'] print(std_headers['User-Agent'])
sys.exit(0) sys.exit(0)
# Batch file verification # Batch file verification
batchurls = [] batchurls = []
if opts.batchfile is not None: if opts.batchfile is not None:
try: try:
if opts.batchfile == '-': if opts.batchfile == '-':
batchfd = sys.stdin batchfd = sys.stdin
else: else:
batchfd = open(opts.batchfile, 'r') batchfd = open(opts.batchfile, 'r')
batchurls = batchfd.readlines() batchurls = batchfd.readlines()
batchurls = [x.strip() for x in batchurls] batchurls = [x.strip() for x in batchurls]
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
except IOError: except IOError:
sys.exit(u'ERROR: batch file could not be read') sys.exit(u'ERROR: batch file could not be read')
all_urls = batchurls + args all_urls = batchurls + args
all_urls = map(lambda url: url.strip(), all_urls) all_urls = [url.strip() for url in all_urls]
# General configuration # General configuration
cookie_processor = urllib2.HTTPCookieProcessor(jar) cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = urllib2.ProxyHandler() proxy_handler = compat_urllib_request.ProxyHandler()
opener = urllib2.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
urllib2.install_opener(opener) compat_urllib_request.install_opener(opener)
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
extractors = gen_extractors() extractors = gen_extractors()
if opts.list_extractors: if opts.list_extractors:
for ie in extractors: for ie in extractors:
print(ie.IE_NAME) print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = filter(lambda url: ie.suitable(url), all_urls) matchedUrls = [url for url in all_urls if ie.suitable(url)]
all_urls = filter(lambda url: url not in matchedUrls, all_urls) all_urls = [url for url in all_urls if url not in matchedUrls]
for mu in matchedUrls: for mu in matchedUrls:
print(u' ' + mu) print(u' ' + mu)
sys.exit(0) sys.exit(0)
# Conflicting, missing and erroneous options # Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None): if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password') parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None: if opts.password is not None and opts.username is None:
parser.error(u'account username missing') parser.error(u'account username missing')
if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber or opts.useid): if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error(u'using output template conflicts with using title, literal title, video ID or auto number') parser.error(u'using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useliteral: if opts.usetitle and opts.useid:
parser.error(u'using title conflicts with using literal title') parser.error(u'using title conflicts with using video ID')
if opts.usetitle and opts.useid: if opts.username is not None and opts.password is None:
parser.error(u'using title conflicts with using video ID') opts.password = getpass.getpass(u'Type account password and press return:')
if opts.useliteral and opts.useid: if opts.ratelimit is not None:
parser.error(u'using literal title conflicts with using video ID') numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if opts.username is not None and opts.password is None: if numeric_limit is None:
opts.password = getpass.getpass(u'Type account password and press return:') parser.error(u'invalid rate limit specified')
if opts.ratelimit is not None: opts.ratelimit = numeric_limit
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) if opts.retries is not None:
if numeric_limit is None: try:
parser.error(u'invalid rate limit specified') opts.retries = int(opts.retries)
opts.ratelimit = numeric_limit except (TypeError, ValueError) as err:
if opts.retries is not None: parser.error(u'invalid retry count specified')
try: if opts.buffersize is not None:
opts.retries = long(opts.retries) numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
except (TypeError, ValueError), err: if numeric_buffersize is None:
parser.error(u'invalid retry count specified') parser.error(u'invalid buffer size specified')
try: opts.buffersize = numeric_buffersize
opts.playliststart = int(opts.playliststart) try:
if opts.playliststart <= 0: opts.playliststart = int(opts.playliststart)
raise ValueError(u'Playlist start must be positive') if opts.playliststart <= 0:
except (TypeError, ValueError), err: raise ValueError(u'Playlist start must be positive')
parser.error(u'invalid playlist start number specified') except (TypeError, ValueError) as err:
try: parser.error(u'invalid playlist start number specified')
opts.playlistend = int(opts.playlistend) try:
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart): opts.playlistend = int(opts.playlistend)
raise ValueError(u'Playlist end must be greater than playlist start') if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
except (TypeError, ValueError), err: raise ValueError(u'Playlist end must be greater than playlist start')
parser.error(u'invalid playlist end number specified') except (TypeError, ValueError) as err:
if opts.extractaudio: parser.error(u'invalid playlist end number specified')
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']: if opts.extractaudio:
parser.error(u'invalid audio format specified') if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
if opts.audioquality: parser.error(u'invalid audio format specified')
opts.audioquality = opts.audioquality.strip('k').strip('K') if opts.audioquality:
if not opts.audioquality.isdigit(): opts.audioquality = opts.audioquality.strip('k').strip('K')
parser.error(u'invalid audio quality specified') if not opts.audioquality.isdigit():
parser.error(u'invalid audio quality specified')
# File downloader if sys.version_info < (3,):
fd = FileDownloader({ # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
'usenetrc': opts.usenetrc, if opts.outtmpl is not None:
'username': opts.username, opts.outtmpl = opts.outtmpl.decode(preferredencoding())
'password': opts.password, outtmpl =((opts.outtmpl is not None and opts.outtmpl)
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
'forceurl': opts.geturl, or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
'forcetitle': opts.gettitle, or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
'forcethumbnail': opts.getthumbnail, or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
'forcedescription': opts.getdescription, or (opts.useid and u'%(id)s.%(ext)s')
'forcefilename': opts.getfilename, or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
'forceformat': opts.getformat, or u'%(id)s.%(ext)s')
'simulate': opts.simulate, # File downloader
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), fd = FileDownloader({
'format': opts.format, 'usenetrc': opts.usenetrc,
'format_limit': opts.format_limit, 'username': opts.username,
'listformats': opts.listformats, 'password': opts.password,
'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding())) 'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s') 'forceurl': opts.geturl,
or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s') 'forcetitle': opts.gettitle,
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') 'forcethumbnail': opts.getthumbnail,
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s') 'forcedescription': opts.getdescription,
or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') 'forcefilename': opts.getfilename,
or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s') 'forceformat': opts.getformat,
or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s') 'simulate': opts.simulate,
or (opts.useid and u'%(id)s.%(ext)s') 'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') 'format': opts.format,
or u'%(id)s.%(ext)s'), 'format_limit': opts.format_limit,
'ignoreerrors': opts.ignoreerrors, 'listformats': opts.listformats,
'ratelimit': opts.ratelimit, 'outtmpl': outtmpl,
'nooverwrites': opts.nooverwrites, 'restrictfilenames': opts.restrictfilenames,
'retries': opts.retries, 'ignoreerrors': opts.ignoreerrors,
'continuedl': opts.continue_dl, 'ratelimit': opts.ratelimit,
'noprogress': opts.noprogress, 'nooverwrites': opts.nooverwrites,
'playliststart': opts.playliststart, 'retries': opts.retries,
'playlistend': opts.playlistend, 'buffersize': opts.buffersize,
'logtostderr': opts.outtmpl == '-', 'noresizebuffer': opts.noresizebuffer,
'consoletitle': opts.consoletitle, 'continuedl': opts.continue_dl,
'nopart': opts.nopart, 'noprogress': opts.noprogress,
'updatetime': opts.updatetime, 'playliststart': opts.playliststart,
'writedescription': opts.writedescription, 'playlistend': opts.playlistend,
'writeinfojson': opts.writeinfojson, 'logtostderr': opts.outtmpl == '-',
'writesubtitles': opts.writesubtitles, 'consoletitle': opts.consoletitle,
'subtitleslang': opts.subtitleslang, 'nopart': opts.nopart,
'matchtitle': opts.matchtitle, 'updatetime': opts.updatetime,
'rejecttitle': opts.rejecttitle, 'writedescription': opts.writedescription,
'max_downloads': opts.max_downloads, 'writeinfojson': opts.writeinfojson,
'prefer_free_formats': opts.prefer_free_formats, 'writesubtitles': opts.writesubtitles,
'verbose': opts.verbose, 'subtitleslang': opts.subtitleslang,
}) 'matchtitle': opts.matchtitle,
'rejecttitle': opts.rejecttitle,
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'test': opts.test,
})
if opts.verbose: if opts.verbose:
fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies)) fd.to_screen(u'[debug] youtube-dl version ' + __version__)
try:
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
fd.to_screen(u'[debug] Git HEAD: ' + out)
except:
pass
fd.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform()))
fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
for extractor in extractors: for extractor in extractors:
fd.add_info_extractor(extractor) fd.add_info_extractor(extractor)
# PostProcessors # PostProcessors
if opts.extractaudio: if opts.extractaudio:
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo)) fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo, nopostoverwrites=opts.nopostoverwrites))
# Update version # Update version
if opts.update_self: if opts.update_self:
updateSelf(fd, sys.argv[0]) update_self(fd.to_screen, opts.verbose, sys.argv[0])
# Maybe do nothing # Maybe do nothing
if len(all_urls) < 1: if len(all_urls) < 1:
if not opts.update_self: if not opts.update_self:
parser.error(u'you must provide at least one URL') parser.error(u'you must provide at least one URL')
else: else:
sys.exit() sys.exit()
try:
retcode = fd.download(all_urls)
except MaxDownloadsReached:
fd.to_screen(u'--max-download limit reached, aborting.')
retcode = 101
# Dump cookie jar if requested try:
if opts.cookiefile is not None: retcode = fd.download(all_urls)
try: except MaxDownloadsReached:
jar.save() fd.to_screen(u'--max-download limit reached, aborting.')
except (IOError, OSError), err: retcode = 101
sys.exit(u'ERROR: unable to save cookie jar')
sys.exit(retcode) # Dump cookie jar if requested
if opts.cookiefile is not None:
try:
jar.save()
except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to save cookie jar')
sys.exit(retcode)
def main(): def main():
try: try:
_real_main() _real_main()
except DownloadError: except DownloadError:
sys.exit(1) sys.exit(1)
except SameFileError: except SameFileError:
sys.exit(u'ERROR: fixed output name but more than one file to download') sys.exit(u'ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt: except KeyboardInterrupt:
sys.exit(u'\nERROR: Interrupted by user') sys.exit(u'\nERROR: Interrupted by user')

View File

@ -1,7 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__ # Execute with
# $ python youtube_dl/__main__.py (2.6+)
# $ python -m youtube_dl (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl
if __name__ == '__main__': if __name__ == '__main__':
__init__.main() youtube_dl.main()

160
youtube_dl/update.py Normal file
View File

@ -0,0 +1,160 @@
import json
import traceback
import hashlib
from zipimport import zipimporter
from .utils import *
from .version import __version__
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
def update_self(to_screen, verbose, filename):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
to_screen(u'It looks like you installed youtube-dl with pip, setup.py or a tarball. Please use that to update.')
return
# Check if there is a new version
try:
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
return
if not 'signature' in versions_info:
to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
return
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
version = versions_info['versions'][versions_info['latest']]
if version.get('notes'):
to_screen(u'PLEASE NOTE:')
for note in version['notes']:
to_screen(note)
if not os.access(filename, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, "frozen"):
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % directory)
return
try:
urlh = compat_urllib_request.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = compat_urllib_request.urlopen(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')

View File

@ -2,363 +2,542 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import gzip import gzip
import htmlentitydefs import io
import HTMLParser import json
import locale import locale
import os import os
import re import re
import sys import sys
import traceback
import zlib import zlib
import urllib2
import email.utils import email.utils
import json import json
try: try:
import cStringIO as StringIO import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError: except ImportError:
import StringIO compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = _unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = _unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
std_headers = { std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5', 'Accept-Language': 'en-us,en;q=0.5',
} }
def preferredencoding(): def preferredencoding():
"""Get preferred encoding. """Get preferred encoding.
Returns the best encoding scheme for the system, based on Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks. locale.getpreferredencoding() and some further tweaks.
""" """
def yield_preferredencoding(): try:
try: pref = locale.getpreferredencoding()
pref = locale.getpreferredencoding() u'TEST'.encode(pref)
u'TEST'.encode(pref) except:
except: pref = 'UTF-8'
pref = 'UTF-8'
while True:
yield pref
return yield_preferredencoding().next()
return pref
if sys.version_info < (3,0):
def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3,0):
def write_json_file(obj, fn):
with open(fn, 'wb') as f:
json.dump(obj, f)
else:
def write_json_file(obj, fn):
with open(fn, 'w', encoding='utf-8') as f:
json.dump(obj, f)
def htmlentity_transform(matchobj): def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a Unicode character. """Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with This function receives a match object and is intended to be used with
the re.sub() function. the re.sub() function.
""" """
entity = matchobj.group(1) entity = matchobj.group(1)
# Known non-numeric HTML entity # Known non-numeric HTML entity
if entity in htmlentitydefs.name2codepoint: if entity in compat_html_entities.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[entity]) return compat_chr(compat_html_entities.name2codepoint[entity])
# Unicode character mobj = re.match(u'(?u)#(x?\\d+)', entity)
mobj = re.match(ur'(?u)#(x?\d+)', entity) if mobj is not None:
if mobj is not None: numstr = mobj.group(1)
numstr = mobj.group(1) if numstr.startswith(u'x'):
if numstr.startswith(u'x'): base = 16
base = 16 numstr = u'0%s' % numstr
numstr = u'0%s' % numstr else:
else: base = 10
base = 10 return compat_chr(int(numstr, base))
return unichr(long(numstr, base))
# Unknown entity in name, return its literal representation # Unknown entity in name, return its literal representation
return (u'&%s;' % entity) return (u'&%s;' % entity)
HTMLParser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class IDParser(HTMLParser.HTMLParser): class AttrParser(compat_html_parser.HTMLParser):
"""Modified HTMLParser that isolates a tag with the specified id""" """Modified HTMLParser that isolates a tag with the specified attribute"""
def __init__(self, id): def __init__(self, attribute, value):
self.id = id self.attribute = attribute
self.result = None self.value = value
self.started = False self.result = None
self.depth = {} self.started = False
self.html = None self.depth = {}
self.watch_startpos = False self.html = None
self.error_count = 0 self.watch_startpos = False
HTMLParser.HTMLParser.__init__(self) self.error_count = 0
compat_html_parser.HTMLParser.__init__(self)
def error(self, message): def error(self, message):
#print >> sys.stderr, self.getpos() if self.error_count > 10 or self.started:
if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos())
raise HTMLParser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1
self.error_count += 1 self.goahead(1)
self.goahead(1)
def loads(self, html): def loads(self, html):
self.html = html self.html = html
self.feed(html) self.feed(html)
self.close() self.close()
def handle_starttag(self, tag, attrs): def handle_starttag(self, tag, attrs):
attrs = dict(attrs) attrs = dict(attrs)
if self.started: if self.started:
self.find_startpos(None) self.find_startpos(None)
if 'id' in attrs and attrs['id'] == self.id: if self.attribute in attrs and attrs[self.attribute] == self.value:
self.result = [tag] self.result = [tag]
self.started = True self.started = True
self.watch_startpos = True self.watch_startpos = True
if self.started: if self.started:
if not tag in self.depth: self.depth[tag] = 0 if not tag in self.depth: self.depth[tag] = 0
self.depth[tag] += 1 self.depth[tag] += 1
def handle_endtag(self, tag): def handle_endtag(self, tag):
if self.started: if self.started:
if tag in self.depth: self.depth[tag] -= 1 if tag in self.depth: self.depth[tag] -= 1
if self.depth[self.result[0]] == 0: if self.depth[self.result[0]] == 0:
self.started = False self.started = False
self.result.append(self.getpos()) self.result.append(self.getpos())
def find_startpos(self, x): def find_startpos(self, x):
"""Needed to put the start position of the result (self.result[1]) """Needed to put the start position of the result (self.result[1])
after the opening tag with the requested id""" after the opening tag with the requested id"""
if self.watch_startpos: if self.watch_startpos:
self.watch_startpos = False self.watch_startpos = False
self.result.append(self.getpos()) self.result.append(self.getpos())
handle_entityref = handle_charref = handle_data = handle_comment = \ handle_entityref = handle_charref = handle_data = handle_comment = \
handle_decl = handle_pi = unknown_decl = find_startpos handle_decl = handle_pi = unknown_decl = find_startpos
def get_result(self): def get_result(self):
if self.result == None: return None if self.result is None:
if len(self.result) != 3: return None return None
lines = self.html.split('\n') if len(self.result) != 3:
lines = lines[self.result[1][0]-1:self.result[2][0]] return None
lines[0] = lines[0][self.result[1][1]:] lines = self.html.split('\n')
if len(lines) == 1: lines = lines[self.result[1][0]-1:self.result[2][0]]
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] lines[0] = lines[0][self.result[1][1]:]
lines[-1] = lines[-1][:self.result[2][1]] if len(lines) == 1:
return '\n'.join(lines).strip() lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
lines[-1] = lines[-1][:self.result[2][1]]
return '\n'.join(lines).strip()
def get_element_by_id(id, html): def get_element_by_id(id, html):
"""Return the content of the tag with the specified id in the passed HTML document""" """Return the content of the tag with the specified ID in the passed HTML document"""
parser = IDParser(id) return get_element_by_attribute("id", id, html)
try:
parser.loads(html) def get_element_by_attribute(attribute, value, html):
except HTMLParser.HTMLParseError: """Return the content of the tag with the specified attribute in the passed HTML document"""
pass parser = AttrParser(attribute, value)
return parser.get_result() try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
def clean_html(html): def clean_html(html):
"""Clean an HTML snippet into a readable string""" """Clean an HTML snippet into a readable string"""
# Newline vs <br /> # Newline vs <br />
html = html.replace('\n', ' ') html = html.replace('\n', ' ')
html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
# Strip html tags html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
html = re.sub('<.*?>', '', html) # Strip html tags
# Replace html entities html = re.sub('<.*?>', '', html)
html = unescapeHTML(html) # Replace html entities
return html html = unescapeHTML(html)
return html
def sanitize_open(filename, open_mode): def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails. """Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open() or it fails and raises a final exception, like the standard open()
function. function.
It returns the tuple (stream, definitive_file_name). It returns the tuple (stream, definitive_file_name).
""" """
try: try:
if filename == u'-': if filename == u'-':
if sys.platform == 'win32': if sys.platform == 'win32':
import msvcrt import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename) return (sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
except (IOError, OSError), err: except (IOError, OSError) as err:
# In case of error, try to remove win32 forbidden chars # In case of error, try to remove win32 forbidden chars
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
# An exception here should be caught in the caller # An exception here should be caught in the caller
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
def timeconvert(timestr): def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp""" """Convert RFC 2822 defined time string into system timestamp"""
timestamp = None timestamp = None
timetuple = email.utils.parsedate_tz(timestr) timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None: if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple) timestamp = email.utils.mktime_tz(timetuple)
return timestamp return timestamp
def sanitize_filename(s):
"""Sanitizes a string so it could be used as part of a filename."""
def replace_insane(char):
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '\''
elif char == ':':
return ' -'
elif char in '\\/|*<>':
return '-'
return char
result = u''.join(map(replace_insane, s)) def sanitize_filename(s, restricted=False, is_id=False):
while '--' in result: """Sanitizes a string so it could be used as part of a filename.
result = result.replace('--', '-') If restricted is set, use a stricter subset of allowed characters.
return result.strip('-') Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
"""
def replace_insane(char):
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
result = u''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if not result:
result = '_'
return result
def orderedSet(iterable): def orderedSet(iterable):
""" Remove all duplicates from the input iterable """ """ Remove all duplicates from the input iterable """
res = [] res = []
for el in iterable: for el in iterable:
if el not in res: if el not in res:
res.append(el) res.append(el)
return res return res
def unescapeHTML(s): def unescapeHTML(s):
""" """
@param s a string (of type unicode) @param s a string
""" """
assert type(s) == type(u'') assert type(s) == type(u'')
result = re.sub(ur'(?u)&(.+?);', htmlentity_transform, s) result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
return result return result
def encodeFilename(s): def encodeFilename(s):
""" """
@param s The name of the file (of type unicode) @param s The name of the file
""" """
assert type(s) == type(u'') assert type(s) == type(u'')
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
return s
else:
return s.encode(sys.getfilesystemencoding(), 'ignore')
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None):
""" tb, if given, is the original traceback (so that it can be printed out). """
super(ExtractorError, self).__init__(msg)
self.traceback = tb
def format_traceback(self):
if self.traceback is None:
return None
return u''.join(traceback.format_tb(self.traceback))
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
return s
else:
return s.encode(sys.getfilesystemencoding(), 'ignore')
class DownloadError(Exception): class DownloadError(Exception):
"""Download Error exception. """Download Error exception.
This exception may be thrown by FileDownloader objects if they are not This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate configured to continue on errors. They will contain the appropriate
error message. error message.
""" """
pass pass
class SameFileError(Exception): class SameFileError(Exception):
"""Same File exception. """Same File exception.
This exception will be thrown by FileDownloader objects if they detect This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk. multiple files would have to be downloaded to the same file on disk.
""" """
pass pass
class PostProcessingError(Exception): class PostProcessingError(Exception):
"""Post Processing exception. """Post Processing exception.
This exception may be raised by PostProcessor's .run() method to This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task. indicate an error in the postprocessing task.
""" """
pass pass
class MaxDownloadsReached(Exception): class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """ """ --max-downloads limit has been reached. """
pass pass
class UnavailableVideoError(Exception): class UnavailableVideoError(Exception):
"""Unavailable Format exception. """Unavailable Format exception.
This exception will be thrown when a video is requested This exception will be thrown when a video is requested
in a format that is not available for that video. in a format that is not available for that video.
""" """
pass pass
class ContentTooShortError(Exception): class ContentTooShortError(Exception):
"""Content Too Short exception. """Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating download is too small for what the server announced first, indicating
the connection was probably interrupted. the connection was probably interrupted.
""" """
# Both in bytes # Both in bytes
downloaded = None downloaded = None
expected = None expected = None
def __init__(self, downloaded, expected): def __init__(self, downloaded, expected):
self.downloaded = downloaded self.downloaded = downloaded
self.expected = expected self.expected = expected
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
class Trouble(Exception): This class, when installed with an OpenerDirector, automatically adds
"""Trouble helper exception the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
This is an exception to be handled with a particular request, the original request in the program code only has
FileDownloader.trouble to include the HTTP header "Youtubedl-No-Compression", which will be
""" removed before making the real request.
class YoutubeDLHandler(urllib2.HTTPHandler): Part of this code was copied from:
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds http://techknack.net/python-urllib2-handlers/
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be
removed before making the real request.
Part of this code was copied from: Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
http://techknack.net/python-urllib2-handlers/ @staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
Andrew Rowls, the author of that code, agreed to release it to the @staticmethod
public domain. def addinfourl_wrapper(stream, headers, url, code):
""" if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
@staticmethod def http_request(self, req):
def deflate(data): for h in std_headers:
try: if h in req.headers:
return zlib.decompress(data, -zlib.MAX_WBITS) del req.headers[h]
except zlib.error: req.add_header(h, std_headers[h])
return zlib.decompress(data) if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression']
return req
@staticmethod def http_response(self, req, resp):
def addinfourl_wrapper(stream, headers, url, code): old_resp = resp
if hasattr(urllib2.addinfourl, 'getcode'): # gzip
return urllib2.addinfourl(stream, headers, url, code) if resp.headers.get('Content-encoding', '') == 'gzip':
ret = urllib2.addinfourl(stream, headers, url) gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r')
ret.code = code resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
return ret resp.msg = old_resp.msg
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp
def http_request(self, req): https_request = http_request
for h in std_headers: https_response = http_response
if h in req.headers:
del req.headers[h]
req.add_header(h, std_headers[h])
if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression']
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = StringIO.StringIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp

2
youtube_dl/version.py Normal file
View File

@ -0,0 +1,2 @@
__version__ = '2013.01.02'