From 0b40544f290de329679aebf06e98056e707dd7e1 Mon Sep 17 00:00:00 2001 From: Dave Vasilevsky Date: Sun, 16 Dec 2012 03:50:41 -0500 Subject: [PATCH 1/3] Preliminary support for twitch.tv and justin.tv --- youtube_dl/InfoExtractors.py | 56 ++++++++++++++++++++++++++++++++++++ youtube_dl/__init__.py | 1 + 2 files changed, 57 insertions(+) mode change 100644 => 100755 youtube_dl/InfoExtractors.py diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py old mode 100644 new mode 100755 index 3a6e84ebb..c5ab8907b --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3634,3 +3634,59 @@ class NBAIE(InfoExtractor): 'description': _findProp(r'
(.*?)'), } return [info] + +class JustinTVIE(InfoExtractor): + """Information extractor for justin.tv and twitch.tv""" + +# _VALID_URL = r"""^(?:http(?:s?)://)?www\.(?:justin|twitch)\.tv/ +# ([^/]+)(?:/b/([^/]+))?/?(?:#.*)?$""" + _VALID_URL = r'^http://www.twitch.tv/(.*)$' + IE_NAME = u'justin.tv' + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + api = 'http://api.justin.tv' + video_id = mobj.group(mobj.lastindex) + if mobj.lastindex == 1: + api += '/channel/archives/%s.json?limit=100' + else: + api += '/clip/show/%s.json' + api = api % (video_id,) + + self.report_extraction(video_id) + # TODO: multiple pages + # TODO: One broadcast may be split into multiple videos. The key + # 'broadcast_id' is the same for all parts, and 'broadcast_part' + # starts at 1 and increases. Can we treat all parts as one video? + try: + urlh = compat_urllib_request.urlopen(api) + webpage_bytes = urlh.read() + webpage = webpage_bytes.decode('utf-8', 'ignore') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) + return + + response = json.loads(webpage) + info = [] + for clip in response: + video_url = clip['video_file_url'] + if video_url: + video_extension = os.path.splitext(video_url)[1][1:] + video_date = re.sub('-', '', clip['created_on'][:10]) + info.append({ + 'id': clip['id'], + 'url': video_url, + 'title': clip['title'], + 'uploader': clip['user_id'] or clip['channel_id'], + 'upload_date': video_date, + 'ext': video_extension, + }) + return info diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index e86ac49b3..f76d31d01 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -399,6 +399,7 @@ def gen_extractors(): GooglePlusIE(), ArteTvIE(), NBAIE(), + JustinTVIE(), GenericIE() ] From 2ab1c5ed1a5bf3f63b3e7e6f09d59e431cbe783c Mon Sep 17 00:00:00 2001 From: Dave Vasilevsky Date: Sun, 16 Dec 2012 04:05:39 -0500 Subject: [PATCH 2/3] Support more than 100 videos for justin.tv --- youtube_dl/InfoExtractors.py | 63 ++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index c5ab8907b..e5118a717 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3637,37 +3637,27 @@ class NBAIE(InfoExtractor): class JustinTVIE(InfoExtractor): """Information extractor for justin.tv and twitch.tv""" - + # TODO: One broadcast may be split into multiple videos. The key + # 'broadcast_id' is the same for all parts, and 'broadcast_part' + # starts at 1 and increases. Can we treat all parts as one video? + # _VALID_URL = r"""^(?:http(?:s?)://)?www\.(?:justin|twitch)\.tv/ # ([^/]+)(?:/b/([^/]+))?/?(?:#.*)?$""" _VALID_URL = r'^http://www.twitch.tv/(.*)$' IE_NAME = u'justin.tv' + + _max_justin_results = 1000 + _justin_page_limit = 100 def report_extraction(self, file_id): """Report information extraction.""" self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - - api = 'http://api.justin.tv' - video_id = mobj.group(mobj.lastindex) - if mobj.lastindex == 1: - api += '/channel/archives/%s.json?limit=100' - else: - api += '/clip/show/%s.json' - api = api % (video_id,) - - self.report_extraction(video_id) - # TODO: multiple pages - # TODO: One broadcast may be split into multiple videos. The key - # 'broadcast_id' is the same for all parts, and 'broadcast_part' - # starts at 1 and increases. Can we treat all parts as one video? + # Return count of items, list of *valid* items + def _parse_page(self, url): + print url try: - urlh = compat_urllib_request.urlopen(api) + urlh = compat_urllib_request.urlopen(url) webpage_bytes = urlh.read() webpage = webpage_bytes.decode('utf-8', 'ignore') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: @@ -3689,4 +3679,35 @@ class JustinTVIE(InfoExtractor): 'upload_date': video_date, 'ext': video_extension, }) + print len(response) + return (len(response), info) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + api = 'http://api.justin.tv' + video_id = mobj.group(mobj.lastindex) + paged = False + if mobj.lastindex == 1: + paged = True + api += '/channel/archives/%s.json' + else: + api += '/clip/show/%s.json' + api = api % (video_id,) + + self.report_extraction(video_id) + + info = [] + offset = 0 + limit = self._justin_page_limit + while offset < self._max_justin_results: + page_url = api + ('?offset=%d&limit=%d' % (offset, limit)) + page_count, page_info = self._parse_page(page_url) + info.extend(page_info) + if not paged or page_count != limit: + break + offset += limit return info From 4096b609484925321c136737c36f17339fa2391c Mon Sep 17 00:00:00 2001 From: Dave Vasilevsky Date: Sun, 16 Dec 2012 04:45:46 -0500 Subject: [PATCH 3/3] Misc justin.tv fixes --- youtube_dl/InfoExtractors.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index e5118a717..418251ec0 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3641,21 +3641,22 @@ class JustinTVIE(InfoExtractor): # 'broadcast_id' is the same for all parts, and 'broadcast_part' # starts at 1 and increases. Can we treat all parts as one video? -# _VALID_URL = r"""^(?:http(?:s?)://)?www\.(?:justin|twitch)\.tv/ -# ([^/]+)(?:/b/([^/]+))?/?(?:#.*)?$""" - _VALID_URL = r'^http://www.twitch.tv/(.*)$' + _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/ + ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$""" + _JUSTIN_PAGE_LIMIT = 100 IE_NAME = u'justin.tv' - - _max_justin_results = 1000 - _justin_page_limit = 100 def report_extraction(self, file_id): """Report information extraction.""" self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) + def report_download_page(self, channel, offset): + """Report attempt to download a single page of videos.""" + self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' % + (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT)) + # Return count of items, list of *valid* items def _parse_page(self, url): - print url try: urlh = compat_urllib_request.urlopen(url) webpage_bytes = urlh.read() @@ -3675,11 +3676,10 @@ class JustinTVIE(InfoExtractor): 'id': clip['id'], 'url': video_url, 'title': clip['title'], - 'uploader': clip['user_id'] or clip['channel_id'], + 'uploader': clip.get('user_id', clip.get('channel_id')), 'upload_date': video_date, 'ext': video_extension, }) - print len(response) return (len(response), info) def _real_extract(self, url): @@ -3702,8 +3702,10 @@ class JustinTVIE(InfoExtractor): info = [] offset = 0 - limit = self._justin_page_limit - while offset < self._max_justin_results: + limit = self._JUSTIN_PAGE_LIMIT + while True: + if paged: + self.report_download_page(video_id, offset) page_url = api + ('?offset=%d&limit=%d' % (offset, limit)) page_count, page_info = self._parse_page(page_url) info.extend(page_info)